entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
---|---|---|---|---|---|---|---|---|---|---|---|
ImgSenRanking | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/w5/cw5vvkfcjyk5vbg23gp3mvqvfkh6smcdvibnigzm65mqweiwnzsd.py
# Topologically Sorted Source Nodes: [norm, clamp, l2_inp, expand_as], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.expand]
# Source node to ATen node mapping:
# clamp => clamp_min
# expand_as => expand
# l2_inp => div
# norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_3, 2.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-12), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_3, %clamp_min), kwargs = {})
# %expand : [num_users=1] = call_function[target=torch.ops.aten.expand.default](args = (%div, [4, 4, 4, 4]), kwargs = {})
triton_poi_fused_clamp_div_expand_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_clamp_div_expand_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_expand_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_expand_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [img_vec], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sent_vec], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, clamp, l2_inp, expand_as], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.expand]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_div_expand_linalg_vector_norm_0.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm_1, clamp_1, l2_inp_1, expand_as_1], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.expand]
triton_poi_fused_clamp_div_expand_linalg_vector_norm_0.run(buf0, buf3, 256, grid=grid(256), stream=stream0)
return (buf2, buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.utils.data
def l2norm(input, p=2.0, dim=1, eps=1e-12):
"""
Compute L2 norm, row-wise
"""
l2_inp = input / input.norm(p, dim, keepdim=True).clamp(min=eps)
return l2_inp.expand_as(input)
def xavier_weight(tensor):
nin, nout = tensor.size()[0], tensor.size()[1]
r = np.sqrt(6.0) / np.sqrt(nin + nout)
return tensor.normal_(0, r)
class ImgSenRanking(torch.nn.Module):
def __init__(self, dim_image, sent_dim, hid_dim):
super(ImgSenRanking, self).__init__()
self.register_buffer('device_id', torch.IntTensor(1))
self.linear_img = torch.nn.Linear(dim_image, hid_dim)
self.linear_sent = torch.nn.Linear(sent_dim, hid_dim)
self.init_weights()
def init_weights(self):
xavier_weight(self.linear_img.weight.data)
xavier_weight(self.linear_sent.weight.data)
self.linear_img.bias.data.fill_(0)
self.linear_sent.bias.data.fill_(0)
def forward(self, sent, img):
img_vec = self.linear_img(img)
sent_vec = self.linear_sent(sent)
return l2norm(sent_vec), l2norm(img_vec)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_image': 4, 'sent_dim': 4, 'hid_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_div_expand_linalg_vector_norm_0(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_expand_linalg_vector_norm_0[grid(256)](buf1,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_div_expand_linalg_vector_norm_0[grid(256)](buf0,
buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf1
def l2norm(input, p=2.0, dim=1, eps=1e-12):
"""
Compute L2 norm, row-wise
"""
l2_inp = input / input.norm(p, dim, keepdim=True).clamp(min=eps)
return l2_inp.expand_as(input)
def xavier_weight(tensor):
nin, nout = tensor.size()[0], tensor.size()[1]
r = np.sqrt(6.0) / np.sqrt(nin + nout)
return tensor.normal_(0, r)
class ImgSenRankingNew(torch.nn.Module):
def __init__(self, dim_image, sent_dim, hid_dim):
super(ImgSenRankingNew, self).__init__()
self.register_buffer('device_id', torch.IntTensor(1))
self.linear_img = torch.nn.Linear(dim_image, hid_dim)
self.linear_sent = torch.nn.Linear(sent_dim, hid_dim)
self.init_weights()
def init_weights(self):
xavier_weight(self.linear_img.weight.data)
xavier_weight(self.linear_sent.weight.data)
self.linear_img.bias.data.fill_(0)
self.linear_sent.bias.data.fill_(0)
def forward(self, input_0, input_1):
primals_1 = self.linear_img.weight
primals_2 = self.linear_img.bias
primals_4 = self.linear_sent.weight
primals_5 = self.linear_sent.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| ypxie/HDGan | ImgSenRanking | false | 16,770 | [
"MIT"
] | 160 | d98e2a85f7ae6ce7bfacd1c15e519558d97cb931 | https://github.com/ypxie/HDGan/tree/d98e2a85f7ae6ce7bfacd1c15e519558d97cb931 |
Seedloss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/y4/cy4eivb6l6uycxihmryhhq3gw5ndggm6gg6767dd3rwqqobry63m.py
# Topologically Sorted Source Nodes: [softmax, probs], Original ATen: [aten._softmax, aten.clamp]
# Source node to ATen node mapping:
# probs => clamp_max, clamp_min
# softmax => div, sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div, 1e-05), kwargs = {})
# %clamp_max : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {})
triton_poi_fused__softmax_clamp_1 = async_compile.triton('triton_poi_fused__softmax_clamp_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_clamp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_clamp_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 1e-05
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = 1.0
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wc/cwczefmqeaent7obz4w6vr2mfdnf6wy2nvwrvwdvjkjwsclkrqy2.py
# Topologically Sorted Source Nodes: [sum_3, probs_1, input_log_prob], Original ATen: [aten.sum, aten.div, aten.log]
# Source node to ATen node mapping:
# input_log_prob => log
# probs_1 => div_1
# sum_3 => sum_4
# Graph fragment:
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_max, [1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, %sum_4), kwargs = {})
# %log : [num_users=2] = call_function[target=torch.ops.aten.log.default](args = (%div_1,), kwargs = {})
triton_poi_fused_div_log_sum_2 = async_compile.triton('triton_poi_fused_div_log_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_log_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_log_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = tl_math.log(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3y/c3ykvhe2wn4im3mu4kndhbrw7wd54fmlvaac7q5kabmyj3gmwkth.py
# Topologically Sorted Source Nodes: [mul_2, loss_bg, sum_6, bg_count], Original ATen: [aten.mul, aten.sum, aten.add]
# Source node to ATen node mapping:
# bg_count => add_1
# loss_bg => sum_9
# mul_2 => mul_2
# sum_6 => sum_7
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_14, %slice_10), kwargs = {})
# %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1, 2, 3]), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%slice_14, [1, 2, 3]), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_7, 1e-05), kwargs = {})
triton_per_fused_add_mul_sum_3 = async_compile.triton('triton_per_fused_add_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_sum_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_sum_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp12, xmask)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qg/cqggpcpuwohgb7xmpl5ytjmzletaqz37uobvvnlmdvkjcruf647d.py
# Topologically Sorted Source Nodes: [mul, loss_fg, sum_5, fg_count], Original ATen: [aten.mul, aten.sum, aten.add]
# Source node to ATen node mapping:
# fg_count => add
# loss_fg => sum_8
# mul => mul
# sum_5 => sum_6
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_6, %slice_2), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1, 2, 3]), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%slice_6, [1, 2, 3]), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_6, 1e-05), kwargs = {})
triton_per_fused_add_mul_sum_4 = async_compile.triton('triton_per_fused_add_mul_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_sum_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_sum_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 48
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + r1 + (64*x0)), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (16 + r1 + (64*x0)), rmask & xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp12, xmask)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mc/cmcgbdqbdpzlaih7oczo6f5b6omcm4pcjtcsrnqnaxci5f2gbrme.py
# Topologically Sorted Source Nodes: [truediv_2, mean_1, loss_bg_1, truediv_1, mean, loss_fg_1, total_loss, isnan_2, sum_9, eq_3], Original ATen: [aten.div, aten.mean, aten.mul, aten.add, aten.isnan, aten.sum, aten.eq]
# Source node to ATen node mapping:
# eq_3 => eq_3
# isnan_2 => isnan_2
# loss_bg_1 => mul_3
# loss_fg_1 => mul_1
# mean => mean
# mean_1 => mean_1
# sum_9 => sum_10
# total_loss => add_2
# truediv_1 => div_2
# truediv_2 => div_3
# Graph fragment:
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_9, %add_1), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div_3,), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, -1), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_8, %add), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div_2,), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, -1), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_1), kwargs = {})
# %isnan_2 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%add_2,), kwargs = {})
# %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%isnan_2,), kwargs = {})
# %eq_3 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%sum_10, 0), kwargs = {})
triton_per_fused_add_div_eq_isnan_mean_mul_sum_5 = async_compile.triton('triton_per_fused_add_div_eq_isnan_mean_mul_sum_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*i1', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {8: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=(8,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_eq_isnan_mean_mul_sum_5', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_eq_isnan_mean_mul_sum_5(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp6 = tl.load(in_ptr2 + (r0), None)
tmp7 = tl.load(in_ptr3 + (r0), None)
tmp2 = tmp0 / tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp8 = tmp6 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = -1.0
tmp15 = tmp13 * tmp14
tmp16 = tmp5 / tmp12
tmp17 = tmp16 * tmp14
tmp18 = tmp15 + tmp17
tmp19 = libdevice.isnan(tmp18).to(tl.int1)
tmp20 = tmp19.to(tl.int64)
tmp21 = tl.full([1, 1], 0, tl.int64)
tmp22 = tmp20 == tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp15, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp17, None)
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp18, None)
tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp22, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax, probs], Original ATen: [aten._softmax, aten.clamp]
triton_poi_fused__softmax_clamp_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sum_3, probs_1, input_log_prob], Original ATen: [aten.sum, aten.div, aten.log]
triton_poi_fused_div_log_sum_2.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf1
buf3 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf4 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [mul_2, loss_bg, sum_6, bg_count], Original ATen: [aten.mul, aten.sum, aten.add]
triton_per_fused_add_mul_sum_3.run(buf5, arg0_1, buf2, buf3, 4, 16, grid=grid(4), stream=stream0)
buf8 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf9 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [mul, loss_fg, sum_5, fg_count], Original ATen: [aten.mul, aten.sum, aten.add]
triton_per_fused_add_mul_sum_4.run(buf10, arg0_1, buf2, buf8, 4, 48, grid=grid(4), stream=stream0)
del arg0_1
del buf2
buf11 = empty_strided_cuda((), (), torch.float32)
buf6 = empty_strided_cuda((), (), torch.float32)
buf7 = buf6; del buf6 # reuse
buf12 = buf11; del buf11 # reuse
buf13 = empty_strided_cuda((), (), torch.float32)
buf14 = empty_strided_cuda((), (), torch.bool)
# Topologically Sorted Source Nodes: [truediv_2, mean_1, loss_bg_1, truediv_1, mean, loss_fg_1, total_loss, isnan_2, sum_9, eq_3], Original ATen: [aten.div, aten.mean, aten.mul, aten.add, aten.isnan, aten.sum, aten.eq]
triton_per_fused_add_div_eq_isnan_mean_mul_sum_5.run(buf7, buf12, buf8, buf10, buf3, buf5, buf13, buf14, 1, 4, grid=grid(1), stream=stream0)
del buf3
del buf8
return (buf13, buf7, buf12, buf5, buf10, buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import functional as F
class Seedloss(nn.Module):
def __init__(self, ignore_label=21):
super(Seedloss, self).__init__()
self.ignore_label = ignore_label
self.eps = 1e-05
def my_softmax(self, score, dim=1):
probs = torch.clamp(F.softmax(score, dim), self.eps, 1)
probs = probs / torch.sum(probs, dim=dim, keepdim=True)
return probs
def forward(self, predict, target):
"""
compute balanced seed loss
:param predict: (n, c, h, w)
:param target: (n, c, h, w)
:return:
"""
assert not target.requires_grad
target = target
assert torch.sum(torch.isinf(predict)) == 0
assert torch.sum(torch.isnan(predict)) == 0
input_log_prob = torch.log(self.my_softmax(predict, dim=1))
assert torch.sum(torch.isnan(input_log_prob)) == 0
fg_prob = input_log_prob[:, 1:, :, :]
fg_label = target[:, 1:, :, :]
fg_count = torch.sum(fg_label, dim=(1, 2, 3)) + self.eps
bg_prob = input_log_prob[:, 0:1, :, :]
bg_label = target[:, 0:1, :, :]
bg_count = torch.sum(bg_label, dim=(1, 2, 3)) + self.eps
loss_fg = torch.sum(fg_label * fg_prob, dim=(1, 2, 3))
loss_fg = -1 * torch.mean(loss_fg / fg_count)
loss_bg = torch.sum(bg_label * bg_prob, dim=(1, 2, 3))
loss_bg = -1 * torch.mean(loss_bg / bg_count)
total_loss = loss_bg + loss_fg
assert torch.sum(torch.isnan(total_loss)
) == 0, 'fg_loss: {} fg_count: {} bg_loss: {} bg_count: {}'.format(
loss_fg, fg_count, loss_bg, bg_count)
return total_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_clamp_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 1e-05
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = 1.0
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_div_log_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = tl_math.log(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_per_fused_add_mul_sum_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp12, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_per_fused_add_mul_sum_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 48
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), rmask & xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = 1e-05
tmp12 = tmp10 + tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp12, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_per_fused_add_div_eq_isnan_mean_mul_sum_5(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp7 = tl.load(in_ptr3 + r0, None)
tmp2 = tmp0 / tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp8 = tmp6 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = -1.0
tmp15 = tmp13 * tmp14
tmp16 = tmp5 / tmp12
tmp17 = tmp16 * tmp14
tmp18 = tmp15 + tmp17
tmp19 = libdevice.isnan(tmp18).to(tl.int1)
tmp20 = tmp19.to(tl.int64)
tmp21 = tl.full([1, 1], 0, tl.int64)
tmp22 = tmp20 == tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None)
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None)
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_clamp_1[grid(256)](buf0, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused_div_log_sum_2[grid(256)](buf1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
buf5 = buf4
del buf4
triton_per_fused_add_mul_sum_3[grid(4)](buf5, arg0_1, buf2, buf3, 4,
16, XBLOCK=1, num_warps=2, num_stages=1)
buf8 = empty_strided_cuda((4,), (1,), torch.float32)
buf9 = empty_strided_cuda((4,), (1,), torch.float32)
buf10 = buf9
del buf9
triton_per_fused_add_mul_sum_4[grid(4)](buf10, arg0_1, buf2, buf8,
4, 48, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf2
buf11 = empty_strided_cuda((), (), torch.float32)
buf6 = empty_strided_cuda((), (), torch.float32)
buf7 = buf6
del buf6
buf12 = buf11
del buf11
buf13 = empty_strided_cuda((), (), torch.float32)
buf14 = empty_strided_cuda((), (), torch.bool)
triton_per_fused_add_div_eq_isnan_mean_mul_sum_5[grid(1)](buf7,
buf12, buf8, buf10, buf3, buf5, buf13, buf14, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf3
del buf8
return buf13, buf7, buf12, buf5, buf10, buf14
class SeedlossNew(nn.Module):
def __init__(self, ignore_label=21):
super(SeedlossNew, self).__init__()
self.ignore_label = ignore_label
self.eps = 1e-05
def my_softmax(self, score, dim=1):
probs = torch.clamp(F.softmax(score, dim), self.eps, 1)
probs = probs / torch.sum(probs, dim=dim, keepdim=True)
return probs
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| yaoqi-zd/SGAN | Seedloss | false | 16,771 | [
"MIT"
] | 48 | 43d8a859b03967e2423a73ef1ba332ee71714ba4 | https://github.com/yaoqi-zd/SGAN/tree/43d8a859b03967e2423a73ef1ba332ee71714ba4 |
PrimaryCaps | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ik/cikwp3af5pjsypo2wrobdh3ptoj7qfddlg24vmarkhoiio3wrbgn.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (131072*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/e3/ce372j34spt6n6c73ychhfy2m6umubchn3rihx4ayqwhd2kzp33y.py
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# a => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2t/c2tugev72iiutoyluzyegaw24yp42ecqfaf34heczmmn66qg4w7y.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution, %sigmoid], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8912896
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 544
x0 = xindex % 4096
x2 = (xindex // 2228224)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((512*x0) + (2097152*x2) + x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 544, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + ((32*x0) + (131072*x2) + ((-512) + x1)), tmp10, eviction_policy='evict_last', other=0.0)
tmp14 = tl.sigmoid(tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp10, tmp14, tmp15)
tmp17 = tl.where(tmp4, tmp9, tmp16)
tl.store(out_ptr0 + (x3), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (512, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_2, (512, ), (1, ))
assert_size_stride(primals_3, (4, 32, 64, 64), (131072, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_5, (32, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 32, 64, 64), (131072, 1, 2048, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 128, 4096, grid=grid(128, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [p], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 512, 64, 64), (2097152, 1, 32768, 512))
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 1, 2048, 32))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_5, 524288, grid=grid(524288), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 544, 64, 64), (2228224, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf1, primals_2, buf3, buf4, 8912896, grid=grid(8912896), stream=stream0)
del buf1
del primals_2
return (reinterpret_tensor(buf4, (4, 64, 64, 544), (2228224, 64, 1, 4096), 0), primals_1, buf0, primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((512, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 32, 64, 64), (131072, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PrimaryCaps(nn.Module):
"""Creates a primary convolutional capsule layer
that outputs a pose matrix and an activation.
Note that for computation convenience, pose matrix
are stored in first part while the activations are
stored in the second part.
Args:
A: output of the normal conv layer
B: number of types of capsules
K: kernel size of convolution
P: size of pose matrix is P*P
stride: stride of convolution
Shape:
input: (*, A, h, w)
output: (*, h', w', B*(P*P+1))
h', w' is computed the same way as convolution layer
parameter size is: K*K*A*B*P*P + B*P*P
"""
def __init__(self, A=32, B=32, K=1, P=4, stride=1):
super(PrimaryCaps, self).__init__()
self.pose = nn.Conv2d(in_channels=A, out_channels=B * P * P,
kernel_size=K, stride=stride, bias=True)
self.a = nn.Conv2d(in_channels=A, out_channels=B, kernel_size=K,
stride=stride, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
p = self.pose(x)
a = self.a(x)
a = self.sigmoid(a)
out = torch.cat([p, a], dim=1)
out = out.permute(0, 2, 3, 1)
return out
def get_inputs():
return [torch.rand([4, 32, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 131072 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 544
x0 = xindex % 4096
x2 = xindex // 2228224
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (512 * x0 + 2097152 * x2 + x1), tmp4,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 544, tl.int64)
tmp13 = tl.load(in_ptr2 + (32 * x0 + 131072 * x2 + (-512 + x1)), tmp10,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.sigmoid(tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp10, tmp14, tmp15)
tmp17 = tl.where(tmp4, tmp9, tmp16)
tl.store(out_ptr0 + x3, tmp17, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (512, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_2, (512,), (1,))
assert_size_stride(primals_3, (4, 32, 64, 64), (131072, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_5, (32,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 32, 64, 64), (131072, 1, 2048, 32),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(128, 4096)](primals_3, buf0, 128, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 1, 2048, 32))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(524288)](buf3, primals_5,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 544, 64, 64), (2228224, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_2[grid(8912896)](buf1, primals_2, buf3, buf4,
8912896, XBLOCK=512, num_warps=8, num_stages=1)
del buf1
del primals_2
return reinterpret_tensor(buf4, (4, 64, 64, 544), (2228224, 64, 1, 4096), 0
), primals_1, buf0, primals_4, buf3
class PrimaryCapsNew(nn.Module):
"""Creates a primary convolutional capsule layer
that outputs a pose matrix and an activation.
Note that for computation convenience, pose matrix
are stored in first part while the activations are
stored in the second part.
Args:
A: output of the normal conv layer
B: number of types of capsules
K: kernel size of convolution
P: size of pose matrix is P*P
stride: stride of convolution
Shape:
input: (*, A, h, w)
output: (*, h', w', B*(P*P+1))
h', w' is computed the same way as convolution layer
parameter size is: K*K*A*B*P*P + B*P*P
"""
def __init__(self, A=32, B=32, K=1, P=4, stride=1):
super(PrimaryCapsNew, self).__init__()
self.pose = nn.Conv2d(in_channels=A, out_channels=B * P * P,
kernel_size=K, stride=stride, bias=True)
self.a = nn.Conv2d(in_channels=A, out_channels=B, kernel_size=K,
stride=stride, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.pose.weight
primals_2 = self.pose.bias
primals_4 = self.a.weight
primals_5 = self.a.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| yl-1993/Matrix-Capsules-EM-PyTorch | PrimaryCaps | false | 16,772 | [
"MIT"
] | 97 | ca4cd7f45a4234ddf49efe9db34c9ff645378437 | https://github.com/yl-1993/Matrix-Capsules-EM-PyTorch/tree/ca4cd7f45a4234ddf49efe9db34c9ff645378437 |
MaskedMHA | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3c/c3czidi7xshj6mlkdh55bbdzoazrux4pkcvyxgledvgfi5mpq3bi.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_1, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/au/cau4pihcaptiev5y2ewn2o2nvrwhk7hogc72cofmmtbyv4rxc2oy.py
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# k => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qz/cqzvghsbyysjkzaicjx2n4wnvluudyg2qgob34ksgcziksqsiwlp.py
# Topologically Sorted Source Nodes: [logical_not, att_1, att_2], Original ATen: [aten.logical_not, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# att_1 => full_default, where
# att_2 => amax, exp, sub, sum_1
# logical_not => logical_not
# Graph fragment:
# %logical_not : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%unsqueeze,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not, %full_default, %view_5), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_logical_not_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_logical_not_masked_fill_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_logical_not_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_logical_not_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp1 = (tmp0 != 0)
tmp2 = tmp1 == 0
tmp4 = float("-inf")
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = (tmp6 != 0)
tmp8 = tmp7 == 0
tmp10 = tl.where(tmp8, tmp4, tmp9)
tmp11 = triton_helpers.maximum(tmp5, tmp10)
tmp13 = (tmp12 != 0)
tmp14 = tmp13 == 0
tmp16 = tl.where(tmp14, tmp4, tmp15)
tmp17 = triton_helpers.maximum(tmp11, tmp16)
tmp19 = (tmp18 != 0)
tmp20 = tmp19 == 0
tmp22 = tl.where(tmp20, tmp4, tmp21)
tmp23 = triton_helpers.maximum(tmp17, tmp22)
tmp24 = tmp5 - tmp23
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp10 - tmp23
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp16 - tmp23
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tmp22 - tmp23
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tl.store(out_ptr0 + (x2), tmp23, xmask)
tl.store(out_ptr1 + (x2), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/d6/cd6r5t2bhtt5hdojxb5kfjzejpbcgyu57yawctvet3nktctlmawg.py
# Topologically Sorted Source Nodes: [logical_not, att_1, att_2], Original ATen: [aten.logical_not, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# att_1 => full_default, where
# att_2 => amax, div, exp, sub
# logical_not => logical_not
# Graph fragment:
# %logical_not : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%unsqueeze,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not, %full_default, %view_5), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_logical_not_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_logical_not_masked_fill_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_logical_not_masked_fill_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_logical_not_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex
x4 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr0 + (x3), xmask)
tmp6 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp1 = (tmp0 != 0)
tmp2 = tmp1 == 0
tmp4 = float("-inf")
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ow/cowh5pyvgisqqf2a5whywebi6iyl2bo32h5th7jr5giaj3lxh2rl.py
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_2, %unsqueeze_1), kwargs = {})
triton_poi_fused_mul_4 = async_compile.triton('triton_poi_fused_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf4, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [att], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [logical_not, att_1, att_2], Original ATen: [aten.logical_not, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_logical_not_masked_fill_2.run(primals_8, buf5, buf6, buf7, 64, grid=grid(64), stream=stream0)
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [logical_not, att_1, att_2], Original ATen: [aten.logical_not, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_logical_not_masked_fill_3.run(buf8, primals_8, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf6
buf9 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
triton_poi_fused_mul_4.run(buf9, primals_7, primals_8, 64, grid=grid(64), stream=stream0)
del primals_7
buf10 = reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
# Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 4), (16, 4, 1))
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [conv1d_3, out_2], Original ATen: [aten.convolution, aten.mul]
triton_poi_fused_mul_4.run(buf12, primals_10, primals_8, 64, grid=grid(64), stream=stream0)
del primals_10
return (buf12, primals_1, primals_2, primals_4, primals_6, primals_8, primals_9, buf8, reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.utils.data
from torch.nn import functional as F
class MaskedMHA(nn.Module):
"""
Multi Head Attention with mask
Modified from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
def __init__(self, n_embd, n_head, attn_pdrop=0.0, proj_pdrop=0.0):
super().__init__()
assert n_embd % n_head == 0
self.n_embd = n_embd
self.n_head = n_head
self.n_channels = n_embd // n_head
self.scale = 1.0 / math.sqrt(self.n_channels)
self.key = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.query = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.value = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.attn_drop = nn.Dropout(attn_pdrop)
self.proj_drop = nn.Dropout(proj_pdrop)
self.proj = nn.Conv1d(self.n_embd, self.n_embd, 1)
def forward(self, x, mask):
B, C, _T = x.size()
k = self.key(x)
q = self.query(x)
v = self.value(x)
k = k.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
q = q.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
v = v.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
att = q * self.scale @ k.transpose(-2, -1)
att = att.masked_fill(torch.logical_not(mask[:, :, None, :]), float
('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
out = att @ (v * mask[:, :, :, None].float())
out = out.transpose(2, 3).contiguous().view(B, C, -1)
out = self.proj_drop(self.proj(out)) * mask.float()
return out, mask
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_embd': 4, 'n_head': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_logical_not_masked_fill_2(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 != 0
tmp2 = tmp1 == 0
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 != 0
tmp8 = tmp7 == 0
tmp10 = tl.where(tmp8, tmp4, tmp9)
tmp11 = triton_helpers.maximum(tmp5, tmp10)
tmp13 = tmp12 != 0
tmp14 = tmp13 == 0
tmp16 = tl.where(tmp14, tmp4, tmp15)
tmp17 = triton_helpers.maximum(tmp11, tmp16)
tmp19 = tmp18 != 0
tmp20 = tmp19 == 0
tmp22 = tl.where(tmp20, tmp4, tmp21)
tmp23 = triton_helpers.maximum(tmp17, tmp22)
tmp24 = tmp5 - tmp23
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp10 - tmp23
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp16 - tmp23
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tmp22 - tmp23
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tl.store(out_ptr0 + x2, tmp23, xmask)
tl.store(out_ptr1 + x2, tmp34, xmask)
@triton.jit
def triton_poi_fused__softmax_logical_not_masked_fill_3(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
x4 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_out_ptr0 + x3, xmask)
tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp1 = tmp0 != 0
tmp2 = tmp1 == 0
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = extern_kernels.convolution(primals_1, primals_6, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf4 = buf0
del buf0
triton_poi_fused_convolution_1[grid(64)](buf4, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_logical_not_masked_fill_2[grid(64)](primals_8
, buf5, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_logical_not_masked_fill_3[grid(256)](buf8,
primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf6
buf9 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf2
triton_poi_fused_mul_4[grid(64)](buf9, primals_7, primals_8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
buf10 = reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 1), 0)
del buf7
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = extern_kernels.convolution(reinterpret_tensor(buf10, (4, 4,
4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf11, (4, 4, 4), (16, 4, 1))
buf12 = buf11
del buf11
triton_poi_fused_mul_4[grid(64)](buf12, primals_10, primals_8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_10
return (buf12, primals_1, primals_2, primals_4, primals_6, primals_8,
primals_9, buf8, reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0))
class MaskedMHANew(nn.Module):
"""
Multi Head Attention with mask
Modified from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
def __init__(self, n_embd, n_head, attn_pdrop=0.0, proj_pdrop=0.0):
super().__init__()
assert n_embd % n_head == 0
self.n_embd = n_embd
self.n_head = n_head
self.n_channels = n_embd // n_head
self.scale = 1.0 / math.sqrt(self.n_channels)
self.key = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.query = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.value = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.attn_drop = nn.Dropout(attn_pdrop)
self.proj_drop = nn.Dropout(proj_pdrop)
self.proj = nn.Conv1d(self.n_embd, self.n_embd, 1)
def forward(self, input_0, input_1):
primals_2 = self.key.weight
primals_3 = self.key.bias
primals_4 = self.query.weight
primals_5 = self.query.bias
primals_6 = self.value.weight
primals_7 = self.value.bias
primals_9 = self.proj.weight
primals_10 = self.proj.bias
primals_1 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0], output[1]
| yjh0410/actionformer_release | MaskedMHA | false | 16,773 | [
"MIT"
] | 61 | 7a97422111d3e29c8d2e14088c850c6975855ea7 | https://github.com/yjh0410/actionformer_release/tree/7a97422111d3e29c8d2e14088c850c6975855ea7 |
FocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [CE], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# CE => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/j6/cj6dxslhpgoy655zvfmv4qaodji627aw7644wxbpueyfrkcumn6e.py
# Topologically Sorted Source Nodes: [CE, neg, p, sub, pow_1, loss, sum_1, truediv], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.exp, aten.rsub, aten.pow, aten.div]
# Source node to ATen node mapping:
# CE => exp, log, mul, neg, sub_1, sum_1, sum_2
# loss => mul_1
# neg => neg_1
# p => exp_1
# pow_1 => pow_1
# sub => sub_2
# sum_1 => sum_3
# truediv => div
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %neg : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%neg,), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %neg), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {})
triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp13 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp16 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp20 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp24 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = -tmp27
tmp29 = tl_math.exp(tmp28)
tmp30 = 1.0
tmp31 = tmp30 - tmp29
tmp32 = tmp31 * tmp27
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.sum(tmp33, 1)[:, None]
tmp36 = 0.25
tmp37 = tmp35 * tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp37, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [CE], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [CE, neg, p, sub, pow_1, loss, sum_1, truediv], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.exp, aten.rsub, aten.pow, aten.div]
triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1.run(buf3, buf0, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.utils.data
import torch
import torch.nn as nn
from torch.nn import functional as F
class FocalLoss(nn.Module):
def __init__(self, weight=None, gamma=1.0, num_classes=80):
super(FocalLoss, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
self.num_classes = num_classes
prior = np.array([0.119217, 0.15927, 0.570566, 0.1045, 0.04089,
0.005522])
self.prior = torch.tensor(prior).float()
self.weight_b = torch.from_numpy(np.array([1.11, 1.06, 1.01, 1.16,
1.84, 10.0, 1.0])).float()
def forward(self, input, target):
CE = F.cross_entropy(input, target, reduction='none')
p = torch.exp(-CE)
loss = (1 - p) ** self.gamma * CE
return loss.sum() / CE.shape[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = -tmp27
tmp29 = tl_math.exp(tmp28)
tmp30 = 1.0
tmp31 = tmp30 - tmp29
tmp32 = tmp31 * tmp27
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.sum(tmp33, 1)[:, None]
tmp36 = 0.25
tmp37 = tmp35 * tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1[grid(1)](
buf3, buf0, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf3,
class FocalLossNew(nn.Module):
def __init__(self, weight=None, gamma=1.0, num_classes=80):
super(FocalLossNew, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
self.num_classes = num_classes
prior = np.array([0.119217, 0.15927, 0.570566, 0.1045, 0.04089,
0.005522])
self.prior = torch.tensor(prior).float()
self.weight_b = torch.from_numpy(np.array([1.11, 1.06, 1.01, 1.16,
1.84, 10.0, 1.0])).float()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| yulonghui/yingying_boss | FocalLoss | false | 16,774 | [
"MIT"
] | 306 | f9cf956cb6507ef43f8005c61027f6b54f418224 | https://github.com/yulonghui/yingying_boss/tree/f9cf956cb6507ef43f8005c61027f6b54f418224 |
GCT | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/vt/cvtlwsahn5oxfhga3dwjdswt3bhkvbh4zkaufpwgeopelblrwpnw.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, add, pow_2], Original ATen: [aten.pow, aten.sum, aten.add]
# Source node to ATen node mapping:
# add => add
# pow_1 => pow_1
# pow_2 => pow_2
# sum_1 => sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-05), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.5), kwargs = {})
triton_per_fused_add_pow_sum_0 = async_compile.triton('triton_per_fused_add_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_pow_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_pow_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bk/cbk3oabqcrqeiz2rnrvk4rxkdmk4ef46ye5mdaspijuw6pzegair.py
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add]
# Source node to ATen node mapping:
# add_1 => add_1
# embedding => mul
# mean => mean
# pow_3 => pow_3
# pow_4 => pow_4
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %primals_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_3, [1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-05), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 0.5), kwargs = {})
triton_poi_fused_add_mean_mul_pow_1 = async_compile.triton('triton_poi_fused_add_mean_mul_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (1))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2))
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (3))
tmp19 = tl.broadcast_to(tmp18, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp4 = tmp3 * tmp3
tmp8 = tmp5 * tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp14 = tmp11 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp20 = tmp17 * tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = 4.0
tmp24 = tmp22 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tl.store(out_ptr0 + (x0), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ni/cnixk7pwsdlgsjxwsmpbrzopu3rzfg26mvtp5cemfxo35lnktysi.py
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4, norm, mul_1, add_2, tanh, gate], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add, aten.div, aten.tanh]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# embedding => mul
# gate => add_3
# mean => mean
# mul_1 => mul_1
# norm => div
# pow_3 => pow_3
# pow_4 => pow_4
# tanh => tanh
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %primals_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_3, [1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-05), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_3, %pow_4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %div), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_4), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1.0), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_tanh_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 / tmp4
tmp6 = tmp2 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = libdevice.tanh(tmp8)
tmp10 = 1.0
tmp11 = tmp9 + tmp10
tl.store(out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vz/cvzd7uxhjcuuvhocpajasfl7npgg3w4tjtzribzneczbe5yv6xr6.py
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4, norm, mul_1, add_2, tanh, gate, mul_2], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add, aten.div, aten.tanh]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# embedding => mul
# gate => add_3
# mean => mean
# mul_1 => mul_1
# mul_2 => mul_2
# norm => div
# pow_3 => pow_3
# pow_4 => pow_4
# tanh => tanh
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %primals_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_3, [1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-05), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_3, %pow_4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %div), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_4), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1.0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %add_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_tanh_3 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_tanh_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [pow_1, sum_1, add, pow_2], Original ATen: [aten.pow, aten.sum, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_add_pow_sum_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add]
triton_poi_fused_add_mean_mul_pow_1.run(buf1, primals_2, buf2, 4, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4, norm, mul_1, add_2, tanh, gate], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add, aten.div, aten.tanh]
triton_poi_fused_add_div_mean_mul_pow_tanh_2.run(buf1, primals_2, primals_3, buf2, primals_4, buf3, 16, grid=grid(16), stream=stream0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4, norm, mul_1, add_2, tanh, gate, mul_2], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add, aten.div, aten.tanh]
triton_poi_fused_add_div_mean_mul_pow_tanh_3.run(primals_1, buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
return (buf4, primals_1, primals_2, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GCT(nn.Module):
def __init__(self, num_channels, epsilon=1e-05, mode='l2', after_relu=False
):
super(GCT, self).__init__()
self.alpha = nn.Parameter(torch.ones(1, num_channels, 1, 1))
self.gamma = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.epsilon = epsilon
self.mode = mode
self.after_relu = after_relu
def forward(self, x):
if self.mode == 'l2':
embedding = (x.pow(2).sum((2, 3), keepdim=True) + self.epsilon
).pow(0.5) * self.alpha
norm = self.gamma / (embedding.pow(2).mean(dim=1, keepdim=True) +
self.epsilon).pow(0.5)
elif self.mode == 'l1':
if not self.after_relu:
_x = torch.abs(x)
else:
_x = x
embedding = _x.sum((2, 3), keepdim=True) * self.alpha
norm = self.gamma / (torch.abs(embedding).mean(dim=1, keepdim=
True) + self.epsilon)
else:
None
exit()
gate = 1.0 + torch.tanh(embedding * norm + self.beta)
return x * gate
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_pow_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + 1)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + 2)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + 3)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp4 = tmp3 * tmp3
tmp8 = tmp5 * tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp14 = tmp11 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp20 = tmp17 * tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = 4.0
tmp24 = tmp22 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tl.store(out_ptr0 + x0, tmp27, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 / tmp4
tmp6 = tmp2 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = libdevice.tanh(tmp8)
tmp10 = 1.0
tmp11 = tmp9 + tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_3(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_add_pow_sum_0[grid(16)](buf1, primals_1, 16, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_poi_fused_add_mean_mul_pow_1[grid(4)](buf1, primals_2, buf2,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_tanh_2[grid(16)](buf1,
primals_2, primals_3, buf2, primals_4, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_tanh_3[grid(256)](primals_1,
buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
return buf4, primals_1, primals_2, primals_3, primals_4, buf1
class GCTNew(nn.Module):
def __init__(self, num_channels, epsilon=1e-05, mode='l2', after_relu=False
):
super(GCTNew, self).__init__()
self.alpha = nn.Parameter(torch.ones(1, num_channels, 1, 1))
self.gamma = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.epsilon = epsilon
self.mode = mode
self.after_relu = after_relu
def forward(self, input_0):
primals_2 = self.alpha
primals_3 = self.gamma
primals_4 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| yoxu515/CFBI | GCT | false | 16,775 | [
"BSD-3-Clause"
] | 312 | 0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586 | https://github.com/yoxu515/CFBI/tree/0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586 |
MyEntLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qv/cqvghag6kuz2rjb2v7nqnkqeqxa2duw4c7xdtud3fzmrw2bs6m6i.py
# Topologically Sorted Source Nodes: [x, sum_1], Original ATen: [aten._softmax, aten.sum]
# Source node to ATen node mapping:
# sum_1 => sum_2
# x => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div, [1]), kwargs = {})
triton_red_fused__softmax_sum_0 = async_compile.triton('triton_red_fused__softmax_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[64, 128],
reduction_hint=ReductionHint.OUTER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__softmax_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__softmax_sum_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 64
rnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 16
x1 = (xindex // 16)
_tmp2 = tl.full([XBLOCK, RBLOCK], float("-inf"), tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (1280*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.maximum(_tmp2, tmp1)
_tmp2 = tl.where(rmask & xmask, tmp3, _tmp2)
tmp2 = triton_helpers.max2(_tmp2, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp2, xmask)
_tmp8 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp4 = tl.load(in_ptr0 + (x0 + (16*r2) + (1280*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp5 = tmp4 - tmp2
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = _tmp8 + tmp7
_tmp8 = tl.where(rmask & xmask, tmp9, _tmp8)
tmp8 = tl.sum(_tmp8, 1)[:, None]
tl.store(out_ptr1 + (x3), tmp8, xmask)
_tmp15 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp10 = tl.load(in_ptr0 + (x0 + (16*r2) + (1280*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp12 / tmp8
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = _tmp15 + tmp14
_tmp15 = tl.where(rmask & xmask, tmp16, _tmp15)
tmp15 = tl.sum(_tmp15, 1)[:, None]
tl.store(out_ptr2 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5w/c5wgqdac46inpbothtgzi32qrgebqx7nrd5saeuj5fwjxmecdhlc.py
# Topologically Sorted Source Nodes: [x, p, logp, mul, ent, entloss], Original ATen: [aten._softmax, aten.div, aten.log2, aten.mul, aten.neg, aten.sum]
# Source node to ATen node mapping:
# ent => neg
# entloss => sum_3
# logp => log2
# mul => mul
# p => div_1
# x => div, exp, sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, %view), kwargs = {})
# %log2 : [num_users=1] = call_function[target=torch.ops.aten.log2.default](args = (%div_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %log2), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mul,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%neg, [1]), kwargs = {})
triton_red_fused__softmax_div_log2_mul_neg_sum_1 = async_compile.triton('triton_red_fused__softmax_div_log2_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[64, 128],
reduction_hint=ReductionHint.OUTER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__softmax_div_log2_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__softmax_div_log2_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 64
rnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x2 = (xindex // 16)
x4 = xindex % 16
x5 = xindex
tmp1 = tl.load(in_out_ptr0 + (x5), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
x1 = (xindex // 4) % 4
_tmp12 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_ptr0 + (x4 + (16*r3) + (1280*x2)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp6 = tl.load(in_ptr2 + (x1 + (4*(r3 // 20)) + (16*x2)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tmp7 = tmp5 / tmp6
tmp8 = libdevice.log2(tmp7)
tmp9 = tmp7 * tmp8
tmp10 = -tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = _tmp12 + tmp11
_tmp12 = tl.where(rmask & xmask, tmp13, _tmp12)
tmp12 = tl.sum(_tmp12, 1)[:, None]
tl.store(in_out_ptr0 + (x5), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 80, 4, 4), (1280, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, sum_1], Original ATen: [aten._softmax, aten.sum]
stream0 = get_raw_stream(0)
triton_red_fused__softmax_sum_0.run(arg0_1, buf0, buf1, buf2, 64, 80, grid=grid(64), stream=stream0)
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x, p, logp, mul, ent, entloss], Original ATen: [aten._softmax, aten.div, aten.log2, aten.mul, aten.neg, aten.sum]
triton_red_fused__softmax_div_log2_mul_neg_sum_1.run(buf3, arg0_1, buf1, buf2, 64, 80, grid=grid(64), stream=stream0)
del arg0_1
del buf1
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 80, 4, 4), (1280, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MyEntLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = torch.nn.Softmax(dim=1)(x)
p = x / torch.repeat_interleave(x.sum(dim=1).unsqueeze(-1), repeats
=20, dim=1)
logp = torch.log2(p)
ent = -torch.mul(p, logp)
entloss = torch.sum(ent, dim=1)
return entloss
def get_inputs():
return [torch.rand([4, 80, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused__softmax_sum_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2,
xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 64
rnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 16
x1 = xindex // 16
_tmp2 = tl.full([XBLOCK, RBLOCK], float('-inf'), tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 1280 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.maximum(_tmp2, tmp1)
_tmp2 = tl.where(rmask & xmask, tmp3, _tmp2)
tmp2 = triton_helpers.max2(_tmp2, 1)[:, None]
tl.store(out_ptr0 + x3, tmp2, xmask)
_tmp8 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp4 = tl.load(in_ptr0 + (x0 + 16 * r2 + 1280 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp5 = tmp4 - tmp2
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = _tmp8 + tmp7
_tmp8 = tl.where(rmask & xmask, tmp9, _tmp8)
tmp8 = tl.sum(_tmp8, 1)[:, None]
tl.store(out_ptr1 + x3, tmp8, xmask)
_tmp15 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp10 = tl.load(in_ptr0 + (x0 + 16 * r2 + 1280 * x1), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp12 / tmp8
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = _tmp15 + tmp14
_tmp15 = tl.where(rmask & xmask, tmp16, _tmp15)
tmp15 = tl.sum(_tmp15, 1)[:, None]
tl.store(out_ptr2 + x3, tmp15, xmask)
@triton.jit
def triton_red_fused__softmax_div_log2_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.
constexpr):
xnumel = 64
rnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x2 = xindex // 16
x4 = xindex % 16
x5 = xindex
tmp1 = tl.load(in_out_ptr0 + x5, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
x1 = xindex // 4 % 4
_tmp12 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_ptr0 + (x4 + 16 * r3 + 1280 * x2), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp6 = tl.load(in_ptr2 + (x1 + 4 * (r3 // 20) + 16 * x2), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tmp7 = tmp5 / tmp6
tmp8 = libdevice.log2(tmp7)
tmp9 = tmp7 * tmp8
tmp10 = -tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = _tmp12 + tmp11
_tmp12 = tl.where(rmask & xmask, tmp13, _tmp12)
tmp12 = tl.sum(_tmp12, 1)[:, None]
tl.store(in_out_ptr0 + x5, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 80, 4, 4), (1280, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_red_fused__softmax_sum_0[grid(64)](arg0_1, buf0, buf1, buf2,
64, 80, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
triton_red_fused__softmax_div_log2_mul_neg_sum_1[grid(64)](buf3,
arg0_1, buf1, buf2, 64, 80, XBLOCK=64, RBLOCK=8, num_warps=4,
num_stages=1)
del arg0_1
del buf1
del buf2
return buf3,
class MyEntLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| yuantn/MI-AOD | MyEntLoss | false | 16,776 | [
"Apache-2.0"
] | 188 | e57114d60f9ce5e43839cdf7068a90ee58092ec8 | https://github.com/yuantn/MI-AOD/tree/e57114d60f9ce5e43839cdf7068a90ee58092ec8 |
ActorNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ff/cffi7vxidma5gei4f6wznc3qzapljmsv5w6dvkcys2pj7dzl4a37.py
# Topologically Sorted Source Nodes: [phi_s], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# phi_s => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/q5/cq52p2qap7uob2ddnn4qeh67r3muutkp3yhbkqpu4eqaemol3idl.py
# Topologically Sorted Source Nodes: [prb_a], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# prb_a => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (50, 4), (4, 1))
assert_size_stride(primals_2, (50, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 50), (50, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
# Topologically Sorted Source Nodes: [phi_s], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 3200, grid=grid(3200), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 50), (50, 1), 0), reinterpret_tensor(primals_4, (50, 4), (1, 50), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [prb_a], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 50), (50, 1), 0), buf3, primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((50, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as func
class ActorNetwork(torch.nn.Module):
def __init__(self, s_space, a_space):
super(ActorNetwork, self).__init__()
self.first_dense = torch.nn.Linear(s_space, 50)
self.second_dense = torch.nn.Linear(50, a_space)
def forward(self, s):
phi_s = func.relu(self.first_dense(s))
prb_a = func.sigmoid(self.second_dense(phi_s))
return prb_a
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'s_space': 4, 'a_space': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (50, 4), (4, 1))
assert_size_stride(primals_2, (50,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 50), (50, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(3200)](buf1,
primals_2, buf4, 3200, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 50), (50, 1), 0),
reinterpret_tensor(primals_4, (50, 4), (1, 50), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_sigmoid_1[grid(256)](buf3, primals_5, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 50), (50, 1), 0
), buf3, primals_4, buf4
class ActorNetworkNew(torch.nn.Module):
def __init__(self, s_space, a_space):
super(ActorNetworkNew, self).__init__()
self.first_dense = torch.nn.Linear(s_space, 50)
self.second_dense = torch.nn.Linear(50, a_space)
def forward(self, input_0):
primals_1 = self.first_dense.weight
primals_2 = self.first_dense.bias
primals_4 = self.second_dense.weight
primals_5 = self.second_dense.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| yutiansut/Personae | ActorNetwork | false | 16,777 | [
"MIT"
] | 1,046 | e5e89cbaaf2c4708952d25fdb25e99837aecdb4e | https://github.com/yutiansut/Personae/tree/e5e89cbaaf2c4708952d25fdb25e99837aecdb4e |
CriticNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ft/cfttpbzrl7pvuvaf4zviz3wpbepmlpbxxkgmmnpx4im7vcpcp77b.py
# Topologically Sorted Source Nodes: [add, pre_q], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# add => add
# pre_q => relu
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = 0.0
tmp10 = tmp8 <= tmp9
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (50, 4), (4, 1))
assert_size_stride(primals_2, (50, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (50, 4), (4, 1))
assert_size_stride(primals_5, (50, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 50), (50, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 50), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
# Topologically Sorted Source Nodes: [add, pre_q], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0.run(buf2, primals_2, buf1, primals_5, buf5, 3200, grid=grid(3200), stream=stream0)
del buf1
del primals_2
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_value], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (64, 50), (50, 1), 0), reinterpret_tensor(primals_7, (50, 1), (1, 50), 0), alpha=1, beta=1, out=buf4)
del primals_8
return (reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (64, 50), (50, 1), 0), primals_7, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((50, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((50, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as func
class CriticNetwork(torch.nn.Module):
def __init__(self, s_space, a_space):
super(CriticNetwork, self).__init__()
self.s_dense = torch.nn.Linear(s_space, 50)
self.a_dense = torch.nn.Linear(a_space, 50)
self.q_dense = torch.nn.Linear(50, 1)
def forward(self, s, a):
phi_s = self.s_dense(s)
phi_a = self.a_dense(a)
pre_q = func.relu(phi_s + phi_a)
q_value = self.q_dense(pre_q)
return q_value
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'s_space': 4, 'a_space': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = 0.0
tmp10 = tmp8 <= tmp9
tl.store(in_out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (50, 4), (4, 1))
assert_size_stride(primals_2, (50,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (50, 4), (4, 1))
assert_size_stride(primals_5, (50,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 50), (50, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 50), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0[grid(3200)](buf2,
primals_2, buf1, primals_5, buf5, 3200, XBLOCK=128, num_warps=4,
num_stages=1)
del buf1
del primals_2
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (64, 50),
(50, 1), 0), reinterpret_tensor(primals_7, (50, 1), (1, 50), 0),
alpha=1, beta=1, out=buf4)
del primals_8
return reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (64, 50), (50, 1), 0), primals_7, buf5
class CriticNetworkNew(torch.nn.Module):
def __init__(self, s_space, a_space):
super(CriticNetworkNew, self).__init__()
self.s_dense = torch.nn.Linear(s_space, 50)
self.a_dense = torch.nn.Linear(a_space, 50)
self.q_dense = torch.nn.Linear(50, 1)
def forward(self, input_0, input_1):
primals_1 = self.s_dense.weight
primals_2 = self.s_dense.bias
primals_4 = self.a_dense.weight
primals_5 = self.a_dense.bias
primals_7 = self.q_dense.weight
primals_8 = self.q_dense.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| yutiansut/Personae | CriticNetwork | false | 16,778 | [
"MIT"
] | 1,046 | e5e89cbaaf2c4708952d25fdb25e99837aecdb4e | https://github.com/yutiansut/Personae/tree/e5e89cbaaf2c4708952d25fdb25e99837aecdb4e |
GELU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ek/cekqmjotkcsacfdbkc7oinl4vltq6kmmxpvttcglsbg5ihsegnbb.py
# Topologically Sorted Source Nodes: [wrapped_sqrt, pow_1, mul, add, mul_1, tanh, add_1, cdf, mul_3], Original ATen: [aten.sqrt, aten.pow, aten.mul, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# cdf => mul_2
# mul => mul
# mul_1 => mul_1
# mul_3 => mul_3
# pow_1 => pow_1
# tanh => tanh
# wrapped_sqrt => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.7978845608028654), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 3), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %mul), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %add), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1.0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %mul_2), kwargs = {})
triton_poi_fused_add_mul_pow_sqrt_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_sqrt_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sqrt_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tmp2 = tmp1 * tmp0
tmp3 = 0.044715
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp6 = 0.7978845608028654
tmp7 = tmp6 * tmp5
tmp8 = libdevice.tanh(tmp7)
tmp9 = 1.0
tmp10 = tmp8 + tmp9
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp0 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [wrapped_sqrt, pow_1, mul, add, mul_1, tanh, add_1, cdf, mul_3], Original ATen: [aten.sqrt, aten.pow, aten.mul, aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class GELU(nn.Module):
def forward(self, x):
cdf = 0.5 * (1.0 + torch.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
return x * cdf
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = tmp1 * tmp0
tmp3 = 0.044715
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp6 = 0.7978845608028654
tmp7 = tmp6 * tmp5
tmp8 = libdevice.tanh(tmp7)
tmp9 = 1.0
tmp10 = tmp8 + tmp9
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp0 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GELUNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| yyht/Funnel_Transformer | GELU | false | 16,779 | [
"MIT"
] | 193 | 4b35a794d5e122a8054471863a52d4eac1c39dcd | https://github.com/yyht/Funnel_Transformer/tree/4b35a794d5e122a8054471863a52d4eac1c39dcd |
DynamicPreHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/kn/cknyjwkwufnzzf4ya3scui55ownkmt5cdh3hggzwsfe3ch5fshzm.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5n/c5ntvxirzrfxhvpqxhnwygvq7ljms62jucexb6q4gzohhvbidcan.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1638400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tc/ctc2mg6575smgmi5ym7g47qyuwaxds2hntyzthl2dmq46vajn25m.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# x_1 => var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
triton_per_fused_native_group_norm_2 = async_compile.triton('triton_per_fused_native_group_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16384, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 5, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_2(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 12800
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex % 50
x1 = (xindex // 50) % 64
x2 = (xindex // 3200)
x4 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (100*((r3 + (128*x1)) % 4096)) + (409600*x2) + ((r3 + (128*x1)) // 4096)), xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 128, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tl.store(out_ptr0 + (x4), tmp10, xmask)
tl.store(out_ptr1 + (x4), tmp16, xmask)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/r7/cr7y4cpmphvbdki47z6sjwhqvpap6fmimynrcbmpoclzdojb5elw.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# x_1 => var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
triton_per_fused_native_group_norm_3 = async_compile.triton('triton_per_fused_native_group_norm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 200
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 50
x1 = (xindex // 50)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (50*r2) + (3200*x1)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + (50*r2) + (3200*x1)), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + (50*r2) + (3200*x1)), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tl.store(out_ptr0 + (x3), tmp13, xmask)
tl.store(out_ptr1 + (x3), tmp14, xmask)
tl.store(out_ptr2 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gd/cgdldbamtse56reif25lhp56oz47dwxhzalv2yrz7d2kd7wnokg3.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# x_1 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused_native_group_norm_4 = async_compile.triton('triton_per_fused_native_group_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 2],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 100
rnumel = 2
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (2*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (2*x0)), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + (2*x0)), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tmp16 = 16384.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(out_ptr2 + (x0), tmp20, xmask)
tl.store(out_ptr0 + (x0), tmp13, xmask)
tl.store(out_ptr1 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/o5/co55ow27kxqru5gjpedo4tmhg6oyewjwn2aduyikuiqnn3wmpzwf.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.native_group_norm, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => add_1, mul_1
# x_2 => relu
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_native_group_norm_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_native_group_norm_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_threshold_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_threshold_backward_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 400
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 100
y1 = (yindex // 100)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (100*x2) + (409600*y1)), ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + ((y3 // 4)), ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((y3 // 4)), ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (y0), ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 16384.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1, 1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = 0.0
tmp17 = tmp15 <= tmp16
tl.store(out_ptr0 + (x2 + (4096*y3)), tmp15, ymask)
tl.store(out_ptr1 + (y0 + (100*x2) + (409600*y1)), tmp17, ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (100, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_2, (100, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (100, ), (1, ))
assert_size_stride(primals_5, (100, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 100, 64, 64), (409600, 1, 6400, 100))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_2, 1638400, grid=grid(1638400), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((4, 25, 1, 1, 2, 64), (3200, 2, 12800, 12800, 1, 50), torch.float32)
buf4 = empty_strided_cuda((4, 25, 1, 1, 2, 64), (3200, 2, 12800, 12800, 1, 50), torch.float32)
buf5 = empty_strided_cuda((4, 25, 1, 1, 2, 64), (3200, 2, 12800, 12800, 1, 50), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_2.run(buf2, buf3, buf4, buf5, 12800, 128, grid=grid(12800), stream=stream0)
buf6 = empty_strided_cuda((4, 25, 1, 1, 2), (50, 2, 200, 200, 1), torch.float32)
buf7 = empty_strided_cuda((4, 25, 1, 1, 2), (50, 2, 200, 200, 1), torch.float32)
buf8 = empty_strided_cuda((4, 25, 1, 1, 2), (50, 2, 200, 200, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_3.run(buf3, buf4, buf5, buf6, buf7, buf8, 200, 64, grid=grid(200), stream=stream0)
del buf3
del buf4
del buf5
buf9 = empty_strided_cuda((4, 25, 1, 1), (25, 1, 100, 100), torch.float32)
buf10 = empty_strided_cuda((4, 25, 1, 1), (25, 1, 100, 100), torch.float32)
buf12 = empty_strided_cuda((4, 25, 1, 1), (25, 1, 100, 100), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_4.run(buf6, buf7, buf8, buf9, buf10, buf12, 100, 2, grid=grid(100), stream=stream0)
del buf6
del buf7
del buf8
buf13 = empty_strided_cuda((4, 100, 64, 64), (409600, 4096, 64, 1), torch.float32)
buf14 = empty_strided_cuda((4, 100, 64, 64), (409600, 1, 6400, 100), torch.bool)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.native_group_norm, aten.relu, aten.threshold_backward]
triton_poi_fused_native_group_norm_relu_threshold_backward_5.run(buf2, buf9, buf10, primals_4, primals_5, buf13, buf14, 400, 4096, grid=grid(400, 4096), stream=stream0)
del buf10
del primals_5
return (buf13, primals_1, buf0, primals_4, buf2, reinterpret_tensor(buf9, (4, 25), (25, 1), 0), reinterpret_tensor(buf12, (4, 25), (25, 1), 0), buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((100, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DynamicPreHead(nn.Module):
def __init__(self, in_dim=3, embed_dim=100, kernel_size=1):
super(DynamicPreHead, self).__init__()
self.conv = nn.Conv2d(in_dim, embed_dim, kernel_size=kernel_size,
stride=1, padding=int((kernel_size - 1) / 2))
self.bn = nn.GroupNorm(int(embed_dim / 4), embed_dim)
self.relu = nn.ReLU(True)
nn.init.kaiming_normal_(self.conv.weight, mode='fan_out',
nonlinearity='relu')
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_per_fused_native_group_norm_2(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 12800
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex % 50
x1 = xindex // 50 % 64
x2 = xindex // 3200
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 100 * ((r3 + 128 * x1) % 4096) +
409600 * x2 + (r3 + 128 * x1) // 4096), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 128, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tl.store(out_ptr0 + x4, tmp10, xmask)
tl.store(out_ptr1 + x4, tmp16, xmask)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_per_fused_native_group_norm_3(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 200
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 50
x1 = xindex // 50
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 50 * r2 + 3200 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 50 * r2 + 3200 * x1), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + 50 * r2 + 3200 * x1), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tl.store(out_ptr0 + x3, tmp13, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused_native_group_norm_4(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 100
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 2 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 2 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 2 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 16384.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(out_ptr2 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_threshold_backward_5(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 400
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 100
y1 = yindex // 100
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 100 * x2 + 409600 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y3 // 4, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y3 // 4, ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 16384.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1, 1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = 0.0
tmp17 = tmp15 <= tmp16
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp15, ymask)
tl.store(out_ptr1 + (y0 + 100 * x2 + 409600 * y1), tmp17, ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (100, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_2, (100,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (100,), (1,))
assert_size_stride(primals_5, (100,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(12, 4096)](primals_3, buf0, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 100, 64, 64), (409600, 1, 6400, 100))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(1638400)](buf2, primals_2,
1638400, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 25, 1, 1, 2, 64), (3200, 2, 12800,
12800, 1, 50), torch.float32)
buf4 = empty_strided_cuda((4, 25, 1, 1, 2, 64), (3200, 2, 12800,
12800, 1, 50), torch.float32)
buf5 = empty_strided_cuda((4, 25, 1, 1, 2, 64), (3200, 2, 12800,
12800, 1, 50), torch.float32)
triton_per_fused_native_group_norm_2[grid(12800)](buf2, buf3, buf4,
buf5, 12800, 128, XBLOCK=8, num_warps=8, num_stages=1)
buf6 = empty_strided_cuda((4, 25, 1, 1, 2), (50, 2, 200, 200, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 25, 1, 1, 2), (50, 2, 200, 200, 1),
torch.float32)
buf8 = empty_strided_cuda((4, 25, 1, 1, 2), (50, 2, 200, 200, 1),
torch.float32)
triton_per_fused_native_group_norm_3[grid(200)](buf3, buf4, buf5,
buf6, buf7, buf8, 200, 64, XBLOCK=8, num_warps=4, num_stages=1)
del buf3
del buf4
del buf5
buf9 = empty_strided_cuda((4, 25, 1, 1), (25, 1, 100, 100), torch.
float32)
buf10 = empty_strided_cuda((4, 25, 1, 1), (25, 1, 100, 100), torch.
float32)
buf12 = empty_strided_cuda((4, 25, 1, 1), (25, 1, 100, 100), torch.
float32)
triton_per_fused_native_group_norm_4[grid(100)](buf6, buf7, buf8,
buf9, buf10, buf12, 100, 2, XBLOCK=128, num_warps=2, num_stages=1)
del buf6
del buf7
del buf8
buf13 = empty_strided_cuda((4, 100, 64, 64), (409600, 4096, 64, 1),
torch.float32)
buf14 = empty_strided_cuda((4, 100, 64, 64), (409600, 1, 6400, 100),
torch.bool)
triton_poi_fused_native_group_norm_relu_threshold_backward_5[grid(
400, 4096)](buf2, buf9, buf10, primals_4, primals_5, buf13,
buf14, 400, 4096, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1)
del buf10
del primals_5
return buf13, primals_1, buf0, primals_4, buf2, reinterpret_tensor(buf9,
(4, 25), (25, 1), 0), reinterpret_tensor(buf12, (4, 25), (25, 1), 0
), buf14
class DynamicPreHeadNew(nn.Module):
def __init__(self, in_dim=3, embed_dim=100, kernel_size=1):
super(DynamicPreHeadNew, self).__init__()
self.conv = nn.Conv2d(in_dim, embed_dim, kernel_size=kernel_size,
stride=1, padding=int((kernel_size - 1) / 2))
self.bn = nn.GroupNorm(int(embed_dim / 4), embed_dim)
self.relu = nn.ReLU(True)
nn.init.kaiming_normal_(self.conv.weight, mode='fan_out',
nonlinearity='relu')
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.bn.weight
primals_5 = self.bn.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| yoxu515/CFBI | DynamicPreHead | false | 16,780 | [
"BSD-3-Clause"
] | 312 | 0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586 | https://github.com/yoxu515/CFBI/tree/0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586 |
Dense | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/v6/cv6odvhmmcyvquog4eo62pdliew53orxzwe2wfzampr64jy3ppa7.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# output_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_3), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 64, 4), (256, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 64, 4), (256, 4, 1), 0), reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf1, reinterpret_tensor(primals_2, (1, 4, 64), (256, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
def get_einsum_string(ndims, einsum_symbols=None):
if einsum_symbols is None:
einsum_symbols = ['u', 'v', 'w', 'x', 'y', 'z']
assert ndims <= len(einsum_symbols)
einsum_prefix = ''
for i in range(ndims):
einsum_prefix += einsum_symbols[i]
return einsum_prefix
def maybe_convert_to_list(x):
if isinstance(x, (int, float)):
return [x]
elif isinstance(x, (list, tuple)):
return list(x)
class Dense(nn.Module):
"""Dense layer."""
def __init__(self, inp_shape, out_shape, bias=True, reverse_order=False):
super(Dense, self).__init__()
self.inp_shape = maybe_convert_to_list(inp_shape)
self.out_shape = maybe_convert_to_list(out_shape)
self.reverse_order = reverse_order
if self.reverse_order:
self.einsum_str = '...{0},{1}{0}->...{1}'.format(get_einsum_string
(len(self.inp_shape), ['a', 'b', 'c', 'd']),
get_einsum_string(len(self.out_shape), ['e', 'f', 'g', 'h']))
weight_shape = self.out_shape + self.inp_shape
else:
self.einsum_str = '...{0},{0}{1}->...{1}'.format(get_einsum_string
(len(self.inp_shape), ['a', 'b', 'c', 'd']),
get_einsum_string(len(self.out_shape), ['e', 'f', 'g', 'h']))
weight_shape = self.inp_shape + self.out_shape
self.weight = nn.Parameter(torch.zeros(weight_shape))
if bias:
self.bias = nn.Parameter(torch.zeros(self.out_shape))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
fan_in = np.prod(self.inp_shape)
fan_out = np.prod(self.out_shape)
std = np.sqrt(1.0 / float(fan_in + fan_out))
nn.init.normal_(self.weight, std=std)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
def forward(self, inputs):
output = torch.einsum(self.einsum_str, inputs, self.weight)
if self.bias is not None:
output = output + self.bias
return output
def extra_repr(self):
return 'inp_shape={}, out_shape={}, bias={}'.format(self.inp_shape,
self.out_shape, self.bias is not None)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inp_shape': 4, 'out_shape': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 64, 4), (256, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 64, 4), (256,
4, 1), 0), reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1),
0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf1, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_2, (1, 4, 64), (256, 1, 4), 0)
def get_einsum_string(ndims, einsum_symbols=None):
if einsum_symbols is None:
einsum_symbols = ['u', 'v', 'w', 'x', 'y', 'z']
assert ndims <= len(einsum_symbols)
einsum_prefix = ''
for i in range(ndims):
einsum_prefix += einsum_symbols[i]
return einsum_prefix
def maybe_convert_to_list(x):
if isinstance(x, (int, float)):
return [x]
elif isinstance(x, (list, tuple)):
return list(x)
class DenseNew(nn.Module):
"""Dense layer."""
def __init__(self, inp_shape, out_shape, bias=True, reverse_order=False):
super(DenseNew, self).__init__()
self.inp_shape = maybe_convert_to_list(inp_shape)
self.out_shape = maybe_convert_to_list(out_shape)
self.reverse_order = reverse_order
if self.reverse_order:
self.einsum_str = '...{0},{1}{0}->...{1}'.format(get_einsum_string
(len(self.inp_shape), ['a', 'b', 'c', 'd']),
get_einsum_string(len(self.out_shape), ['e', 'f', 'g', 'h']))
weight_shape = self.out_shape + self.inp_shape
else:
self.einsum_str = '...{0},{0}{1}->...{1}'.format(get_einsum_string
(len(self.inp_shape), ['a', 'b', 'c', 'd']),
get_einsum_string(len(self.out_shape), ['e', 'f', 'g', 'h']))
weight_shape = self.inp_shape + self.out_shape
self.weight = nn.Parameter(torch.zeros(weight_shape))
if bias:
self.bias = nn.Parameter(torch.zeros(self.out_shape))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
fan_in = np.prod(self.inp_shape)
fan_out = np.prod(self.out_shape)
std = np.sqrt(1.0 / float(fan_in + fan_out))
nn.init.normal_(self.weight, std=std)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
def extra_repr(self):
return 'inp_shape={}, out_shape={}, bias={}'.format(self.inp_shape,
self.out_shape, self.bias is not None)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| yyht/Funnel_Transformer | Dense | false | 16,781 | [
"MIT"
] | 193 | 4b35a794d5e122a8054471863a52d4eac1c39dcd | https://github.com/yyht/Funnel_Transformer/tree/4b35a794d5e122a8054471863a52d4eac1c39dcd |
InteractionLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/o7/co7hznwocczd7hslneqfmgyzcw5e3oi23ln24wfmq6yj6lmq6b43.py
# Topologically Sorted Source Nodes: [det_weight], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# det_weight => amax, clone, exp, sub
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qp/cqp3fdf3ecmhmbwcc3rtgik3semb3eaopuhjgnixrrilbp6nufln.py
# Topologically Sorted Source Nodes: [det_weight], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# det_weight => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (y0 + (16*y1)), ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + y0 + (16*y1)), ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + y0 + (16*y1)), ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + y0 + (16*y1)), ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + (4*y3)), tmp8, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ef/cef3w3a6i5ps2bwl3fxt5xklbk5ykvelijsohc5pkm2gqsxp2prg.py
# Topologically Sorted Source Nodes: [rel_out, rel_out_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# rel_out => add
# rel_out_1 => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm_1, %permute_7), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_2 = async_compile.triton('triton_poi_fused_add_native_layer_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + ((4*x1) + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1) + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x1) + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x1) + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ln/clntvr3o2zfnjpakitdui5owy4gze4rd5v67pjrfsnyofta475wa.py
# Topologically Sorted Source Nodes: [rel_out, rel_out_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# rel_out => add
# rel_out_1 => add_1, add_2, mul, mul_1, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm_1, %permute_7), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_9), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_10), kwargs = {})
triton_poi_fused_add_native_layer_norm_3 = async_compile.triton('triton_poi_fused_add_native_layer_norm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x4 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2) + (16*x1)), xmask)
tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [det_attn_in], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [rel_attn_in], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [det_value], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_7
del primals_8
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (4, 16, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (4, 1, 16), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [det_weight], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [det_weight], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 16, 4, grid=grid(16, 4), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [rel_add], Original ATen: [aten.bmm]
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 4), (4, 16, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [rel_out, rel_out_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_2.run(buf6, primals_6, buf7, buf8, 16, grid=grid(16), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [rel_out, rel_out_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_3.run(buf6, primals_6, buf7, buf8, primals_9, primals_10, buf9, 64, grid=grid(64), stream=stream0)
del buf7
del buf8
del primals_10
return (reinterpret_tensor(buf9, (4, 4, 4), (4, 16, 1), 0), primals_6, primals_9, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf5, buf6, reinterpret_tensor(buf2, (4, 4, 4), (4, 1, 16), 0), reinterpret_tensor(buf0, (4, 4, 4), (4, 1, 16), 0), reinterpret_tensor(buf1, (4, 4, 4), (4, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torchvision.transforms.functional as F
from torch import nn
import torch.nn.functional as F
class InteractionLayer(nn.Module):
def __init__(self, d_model, d_feature, dropout=0.1):
super().__init__()
self.d_feature = d_feature
self.det_tfm = nn.Linear(d_model, d_feature)
self.rel_tfm = nn.Linear(d_model, d_feature)
self.det_value_tfm = nn.Linear(d_model, d_feature)
self.rel_norm = nn.LayerNorm(d_model)
if dropout is not None:
self.dropout = dropout
self.det_dropout = nn.Dropout(dropout)
self.rel_add_dropout = nn.Dropout(dropout)
else:
self.dropout = None
def forward(self, det_in, rel_in):
det_attn_in = self.det_tfm(det_in)
rel_attn_in = self.rel_tfm(rel_in)
det_value = self.det_value_tfm(det_in)
scores = torch.matmul(det_attn_in.transpose(0, 1), rel_attn_in.
permute(1, 2, 0)) / math.sqrt(self.d_feature)
det_weight = F.softmax(scores.transpose(1, 2), dim=-1)
if self.dropout is not None:
det_weight = self.det_dropout(det_weight)
rel_add = torch.matmul(det_weight, det_value.transpose(0, 1))
rel_out = self.rel_add_dropout(rel_add) + rel_in.transpose(0, 1)
rel_out = self.rel_norm(rel_out)
return det_in, rel_out.transpose(0, 1)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_feature': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (y0 + 16 * y1), ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + y0 + 16 * y1), ymask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + y0 + 16 * y1), ymask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + y0 + 16 * y1), ymask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 * x1 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1 + 16 * x0), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x1 + 16 * x0), xmask, eviction_policy
='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x1 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x4 = xindex // 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_7
del primals_8
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (4, 16, 1),
0), reinterpret_tensor(buf1, (4, 4, 4), (4, 1, 16), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_1[grid(16, 4)](buf4, buf5, 16, 4, XBLOCK=
4, YBLOCK=16, num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 4), (4, 16,
1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(16)](buf6, primals_6,
buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_3[grid(64)](buf6, primals_6,
buf7, buf8, primals_9, primals_10, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf7
del buf8
del primals_10
return reinterpret_tensor(buf9, (4, 4, 4), (4, 16, 1), 0
), primals_6, primals_9, reinterpret_tensor(primals_3, (16, 4), (4,
1), 0), buf5, buf6, reinterpret_tensor(buf2, (4, 4, 4), (4, 1, 16), 0
), reinterpret_tensor(buf0, (4, 4, 4), (4, 1, 16), 0
), reinterpret_tensor(buf1, (4, 4, 4), (4, 16, 1), 0)
class InteractionLayerNew(nn.Module):
def __init__(self, d_model, d_feature, dropout=0.1):
super().__init__()
self.d_feature = d_feature
self.det_tfm = nn.Linear(d_model, d_feature)
self.rel_tfm = nn.Linear(d_model, d_feature)
self.det_value_tfm = nn.Linear(d_model, d_feature)
self.rel_norm = nn.LayerNorm(d_model)
if dropout is not None:
self.dropout = dropout
self.det_dropout = nn.Dropout(dropout)
self.rel_add_dropout = nn.Dropout(dropout)
else:
self.dropout = None
def forward(self, input_0, input_1):
primals_1 = self.det_tfm.weight
primals_2 = self.det_tfm.bias
primals_4 = self.rel_tfm.weight
primals_5 = self.rel_tfm.bias
primals_7 = self.det_value_tfm.weight
primals_8 = self.det_value_tfm.bias
primals_9 = self.rel_norm.weight
primals_10 = self.rel_norm.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0], output[1]
| yoyomimi/AS-Net | InteractionLayer | false | 16,782 | [
"MIT"
] | 49 | 85ce753707c6d1838c3983111ccbba4b1861f438 | https://github.com/yoyomimi/AS-Net/tree/85ce753707c6d1838c3983111ccbba4b1861f438 |
Biaffine | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/bc/cbcett6ey62xkijoadrmiqwnmvdqa242vrdqwxiw4pvecwqjoged.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %full_default], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cf/ccfq5awbbg5h64kuwgad4iylej2dcjqm5wgnshn6bufymm3pj5ag.py
# Topologically Sorted Source Nodes: [s, s_1], Original ATen: [aten.div, aten.squeeze]
# Source node to ATen node mapping:
# s => div
# s_1 => squeeze
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_6, 1), kwargs = {})
# %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.dim](args = (%div, 1), kwargs = {})
triton_poi_fused_div_squeeze_1 = async_compile.triton('triton_poi_fused_div_squeeze_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_squeeze_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_squeeze_1(in_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr1 + (y0 + (4*x2) + (16*y1)), tmp2, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 80, grid=grid(80), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1), 0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(primals_2, buf2, 80, grid=grid(80), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm]
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1, 5), 0), out=buf3)
del buf1
buf5 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [s, s_1], Original ATen: [aten.div, aten.squeeze]
triton_poi_fused_div_squeeze_1.run(buf3, buf5, 16, 4, grid=grid(16, 4), stream=stream0)
del buf3
return (buf5, reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 5, 5), (25, 5, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Biaffine(nn.Module):
"""
Biaffine layer for first-order scoring :cite:`dozat-etal-2017-biaffine`.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y)` of the vector pair :math:`(x, y)` is computed as :math:`x^T W y / d^s`,
where `d` and `s` are vector dimension and scaling factor respectively.
:math:`x` and :math:`y` can be concatenated with bias terms.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
scale (float):
Factor to scale the scores. Default: 0.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``True``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``True``.
"""
def __init__(self, n_in, n_out=1, scale=0, bias_x=True, bias_y=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.scale = scale
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.n_out > 1:
s += f', n_out={self.n_out}'
if self.scale != 0:
s += f', scale={self.scale}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y):
"""
Args:
x (torch.Tensor): ``[batch_size, seq_len, n_in]``.
y (torch.Tensor): ``[batch_size, seq_len, n_in]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len]``.
If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically.
"""
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y
) / self.n_in ** self.scale
s = s.squeeze(1)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_div_squeeze_1(in_ptr0, out_ptr1, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr1 + (y0 + 4 * x2 + 16 * y1), tmp2, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](primals_1, buf0, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1),
0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(80)](primals_2, buf2, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1,
5), 0), out=buf3)
del buf1
buf5 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_div_squeeze_1[grid(16, 4)](buf3, buf5, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf3
return buf5, reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0
), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0)
class BiaffineNew(nn.Module):
"""
Biaffine layer for first-order scoring :cite:`dozat-etal-2017-biaffine`.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y)` of the vector pair :math:`(x, y)` is computed as :math:`x^T W y / d^s`,
where `d` and `s` are vector dimension and scaling factor respectively.
:math:`x` and :math:`y` can be concatenated with bias terms.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
scale (float):
Factor to scale the scores. Default: 0.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``True``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``True``.
"""
def __init__(self, n_in, n_out=1, scale=0, bias_x=True, bias_y=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.scale = scale
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.n_out > 1:
s += f', n_out={self.n_out}'
if self.scale != 0:
s += f', scale={self.scale}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| yzhangcs/parser | Biaffine | false | 16,783 | [
"MIT"
] | 439 | 3abebde1c9fe0bf2e99adce845aaf2a04b194f8a | https://github.com/yzhangcs/parser/tree/3abebde1c9fe0bf2e99adce845aaf2a04b194f8a |
SAN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qw/cqw7yoyglmtjad3kirznl5odetqfs3k6pjtnfdbzklyhsdvuvgft.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rh/crhjfwyl6xoj5ylcsbbh6lp2vlegits2zkdej3b3wb2q4ddfnejv.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7m/c7my77j7miwq7j5yz26lhwtp4fyb6qiw2vuvksvbnxxhdrtuljuq.py
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add
# src_1 => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [1]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/uy/cuyacfovgswdpyhlq2s2chxvljavfbdvz7wnuo2oaa6t6ewmxjgf.py
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add
# src_1 => add_1, add_2, mul_1, mul_2, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_7), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_6), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_4.run(primals_1, buf9, buf10, buf11, 4, grid=grid(4), stream=stream0)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_1, buf9, buf10, buf11, primals_6, primals_7, buf12, 16, grid=grid(16), stream=stream0)
del buf10
del buf11
del primals_7
return (buf12, primals_1, primals_6, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SAN(nn.Module):
def __init__(self, d_model, nhead, dropout=0.1):
super(SAN, self).__init__()
self.d_model = d_model
self.nhead = nhead
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.dropout = nn.Dropout(p=dropout)
self.norm = nn.LayerNorm(d_model)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
"""
:param src:
:param src_mask:
:param src_key_padding_mask:
:return:
"""
src2, _ = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)
src = src + self.dropout(src2)
src = self.norm(src)
return src
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4,
1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(4)](primals_1, buf9,
buf10, buf11, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf9,
buf10, buf11, primals_6, primals_7, buf12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf10
del buf11
del primals_7
return buf12, primals_1, primals_6, buf6, reinterpret_tensor(buf8, (4,
4), (4, 1), 0), buf9, primals_4, reinterpret_tensor(buf2, (4, 1, 4),
(1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0)
class SANNew(nn.Module):
def __init__(self, d_model, nhead, dropout=0.1):
super(SANNew, self).__init__()
self.d_model = d_model
self.nhead = nhead
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.dropout = nn.Dropout(p=dropout)
self.norm = nn.LayerNorm(d_model)
def forward(self, input_0):
primals_2 = self.self_attn.in_proj_weight
primals_3 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_5 = self.self_attn.out_proj.bias
primals_6 = self.norm.weight
primals_7 = self.norm.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| yuriy-os/russian-reviews-bert-e2e-absa | SAN | false | 16,784 | [
"Apache-2.0"
] | 293 | 369a6179353e3bf28643e8e9347b624078e38bf4 | https://github.com/yuriy-os/russian-reviews-bert-e2e-absa/tree/369a6179353e3bf28643e8e9347b624078e38bf4 |
Triaffine | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ao/caoovxtqrx42gvkmjirowqmmbh6kppvfh5ebrzzv4kzkgwm2umii.py
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# w => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ah/cahcbdgzcypclgrmenrcgftl53kemvcm53v6yoxzwdqjyblrincb.py
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# einsum_1 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_9,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ue/cuezbfqwohbeaz3pb7qhlpx5cqgl5yrcj3nds7mhmzqk7fidklrx.py
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# einsum_1 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_12,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = (xindex // 4)
y0 = yindex % 4
y1 = (yindex // 4)
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x3) + (16*x2) + (64*y1)), xmask & ymask)
tl.store(out_ptr0 + (x5 + (16*y4)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wz/cwzzscp4ql5kkrot2gvbemwwec2pnayjcrcdrmgn5smgevcasyrv.py
# Topologically Sorted Source Nodes: [s, s_1], Original ATen: [aten.div, aten.squeeze]
# Source node to ATen node mapping:
# s => div
# s_1 => squeeze
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_10, 1), kwargs = {})
# %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.dim](args = (%div, 1), kwargs = {})
triton_poi_fused_div_squeeze_3 = async_compile.triton('triton_poi_fused_div_squeeze_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_squeeze_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_squeeze_3(in_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr1 + (y0 + (16*x2) + (64*y1)), tmp2, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 1, 1), (16, 16, 4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf0, (1, 4, 16), (0, 16, 1), 0), out=buf1)
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 4, 1, 4, 1), (64, 16, 16, 4, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_4, reinterpret_tensor(buf2, (4, 4, 16), (64, 16, 1), 0), out=buf3)
buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4, 4, 1, 1), (64, 16, 16, 4, 1, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf3, buf4, 16, 16, grid=grid(16, 16), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_3, reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1), 0), out=buf5)
buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 4, 1, 16), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [s, s_1], Original ATen: [aten.div, aten.squeeze]
triton_poi_fused_div_squeeze_3.run(buf5, buf7, 64, 4, grid=grid(64, 4), stream=stream0)
del buf5
return (buf7, reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_2, (1, 4, 16), (64, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Triaffine(nn.Module):
"""
Triaffine layer for second-order scoring :cite:`zhang-etal-2020-efficient,wang-etal-2019-second`.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y, z)` of the vector triple :math:`(x, y, z)` is computed as :math:`x^T z^T W y / d^s`,
where `d` and `s` are vector dimension and scaling factor respectively.
:math:`x` and :math:`y` can be concatenated with bias terms.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
scale (float):
Factor to scale the scores. Default: 0.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``False``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``False``.
"""
def __init__(self, n_in, n_out=1, scale=0, bias_x=False, bias_y=False):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.scale = scale
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in,
n_in + bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.n_out > 1:
s += f', n_out={self.n_out}'
if self.scale != 0:
s += f', scale={self.scale}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y, z):
"""
Args:
x (torch.Tensor): ``[batch_size, seq_len, n_in]``.
y (torch.Tensor): ``[batch_size, seq_len, n_in]``.
z (torch.Tensor): ``[batch_size, seq_len, n_in]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len, seq_len]``.
If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically.
"""
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
w = torch.einsum('bzk,oikj->bozij', z, self.weight)
s = torch.einsum('bxi,bozij,byj->bozxy', x, w, y
) / self.n_in ** self.scale
s = s.squeeze(1)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'n_in': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = xindex // 4
y0 = yindex % 4
y1 = yindex // 4
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x3 + 16 * x2 + 64 * y1), xmask & ymask)
tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_div_squeeze_3(in_ptr0, out_ptr1, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr1 + (y0 + 16 * x2 + 64 * y1), tmp2, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 1, 1), (16, 16, 4, 1, 1, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4,
1), 0), reinterpret_tensor(buf0, (1, 4, 16), (0, 16, 1), 0),
out=buf1)
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 4, 1, 4, 1), (64, 16, 16, 4, 4,
1, 1), torch.float32)
triton_poi_fused_clone_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0)
del buf1
extern_kernels.bmm(primals_4, reinterpret_tensor(buf2, (4, 4, 16),
(64, 16, 1), 0), out=buf3)
buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4, 4, 1, 1), (64, 16, 16,
4, 1, 1, 1), 0)
del buf2
triton_poi_fused_clone_2[grid(16, 16)](buf3, buf4, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
buf5 = buf3
del buf3
extern_kernels.bmm(primals_3, reinterpret_tensor(buf4, (4, 4, 16),
(64, 16, 1), 0), out=buf5)
buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 4, 1, 16), 0)
del buf4
triton_poi_fused_div_squeeze_3[grid(64, 4)](buf5, buf7, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del buf5
return buf7, reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_2, (1, 4, 16), (64, 1, 4), 0)
class TriaffineNew(nn.Module):
"""
Triaffine layer for second-order scoring :cite:`zhang-etal-2020-efficient,wang-etal-2019-second`.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y, z)` of the vector triple :math:`(x, y, z)` is computed as :math:`x^T z^T W y / d^s`,
where `d` and `s` are vector dimension and scaling factor respectively.
:math:`x` and :math:`y` can be concatenated with bias terms.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
scale (float):
Factor to scale the scores. Default: 0.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``False``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``False``.
"""
def __init__(self, n_in, n_out=1, scale=0, bias_x=False, bias_y=False):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.scale = scale
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in,
n_in + bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.n_out > 1:
s += f', n_out={self.n_out}'
if self.scale != 0:
s += f', scale={self.scale}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, input_0, input_1, input_2):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| yzhangcs/parser | Triaffine | false | 16,785 | [
"MIT"
] | 439 | 3abebde1c9fe0bf2e99adce845aaf2a04b194f8a | https://github.com/yzhangcs/parser/tree/3abebde1c9fe0bf2e99adce845aaf2a04b194f8a |
GlobalMaxPooling | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/2l/c2lm5wvy5varadxpp77k6lvi6yjwzernwi4uqg6gmabg2nygeeur.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {})
triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
stream0 = get_raw_stream(0)
triton_poi_fused_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GlobalMaxPooling(nn.Module):
def __init__(self):
super(GlobalMaxPooling, self).__init__()
def forward(self, x):
res, _ = torch.max(x, dim=1)
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class GlobalMaxPoolingNew(nn.Module):
def __init__(self):
super(GlobalMaxPoolingNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| zake7749/Sequence-to-Sequence-101 | GlobalMaxPooling | false | 16,786 | [
"MIT"
] | 64 | f9e9a8e836dc1cb3b35d6e148f6378fcd2736951 | https://github.com/zake7749/Sequence-to-Sequence-101/tree/f9e9a8e836dc1cb3b35d6e148f6378fcd2736951 |
PositionalEmbedding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/am/camw2m64bebmwz2eb4qwmmjxa5upc3apbxzda4ulym6tnb423hbl.py
# Topologically Sorted Source Nodes: [new_tensor, long], Original ATen: [aten.lift_fresh, aten._to_copy]
# Source node to ATen node mapping:
# long => convert_element_type
# new_tensor => lift_fresh_copy
# Graph fragment:
# %lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%_tensor_constant0,), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lift_fresh_copy, torch.int64), kwargs = {})
triton_poi_fused__to_copy_lift_fresh_0 = async_compile.triton('triton_poi_fused__to_copy_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_lift_fresh_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = tmp13.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/nb/cnb3kg2pxtnalk5p7pym3u3tddofdvgfyv2zsrfnaxmvow2vx2kx.py
# Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
# Source node to ATen node mapping:
# embedding => embedding
# Graph fragment:
# %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %convert_element_type), kwargs = {})
triton_poi_fused_embedding_1 = async_compile.triton('triton_poi_fused_embedding_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_embedding_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_embedding_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = tmp13.to(tl.int32)
tmp15 = tl.load(in_ptr0 + (x0 + (4*tmp14)), xmask)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [new_tensor, long], Original ATen: [aten.lift_fresh, aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_lift_fresh_0.run(buf0, 4, grid=grid(4), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
triton_poi_fused_embedding_1.run(primals_2, buf1, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1024, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PositionalEmbedding(nn.Module):
def __init__(self, n_model, max_len=1024):
super().__init__()
self.embed = nn.Embedding(max_len, n_model)
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
w = self.embed.weight
max_len, n_model = w.shape
w = w.new_tensor(range(max_len)).unsqueeze(-1) / 10000 ** (w.
new_tensor(range(n_model)) // 2 * 2 / n_model)
w[:, 0::2], w[:, 1::2] = w[:, 0::2].sin(), w[:, 1::2].cos()
self.embed.weight.copy_(w)
def forward(self, x):
return self.embed(x.new_tensor(range(x.shape[1])).long())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_model': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_lift_fresh_0(out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = tmp13.to(tl.int32)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_embedding_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = tmp13.to(tl.int32)
tmp15 = tl.load(in_ptr0 + (x0 + 4 * tmp14), xmask)
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_lift_fresh_0[grid(4)](buf0, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_embedding_1[grid(16)](primals_2, buf1, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_2
return buf1, buf0
class PositionalEmbeddingNew(nn.Module):
def __init__(self, n_model, max_len=1024):
super().__init__()
self.embed = nn.Embedding(max_len, n_model)
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
w = self.embed.weight
max_len, n_model = w.shape
w = w.new_tensor(range(max_len)).unsqueeze(-1) / 10000 ** (w.
new_tensor(range(n_model)) // 2 * 2 / n_model)
w[:, 0::2], w[:, 1::2] = w[:, 0::2].sin(), w[:, 1::2].cos()
self.embed.weight.copy_(w)
def forward(self, input_0):
primals_2 = self.embed.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| yzhangcs/parser | PositionalEmbedding | false | 16,787 | [
"MIT"
] | 439 | 3abebde1c9fe0bf2e99adce845aaf2a04b194f8a | https://github.com/yzhangcs/parser/tree/3abebde1c9fe0bf2e99adce845aaf2a04b194f8a |
KernelConv | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/vh/cvhjvjmkjscykhh3oon6hhydyzac3b4r5sl5nm5v47liggifjmju.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = (-2) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-2) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-10) + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5i/c5igh763ellxntjqakmflue7utlwnlaqs2p36qbokrbu5ptv7fh6.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_1 = async_compile.triton('triton_poi_fused_stack_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = (-2) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-9) + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ub/cubyaval4kvueulzlsav6bkn53kyoadwwxwbculdhxummgc3s7in.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_2 = async_compile.triton('triton_poi_fused_stack_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x3 = xindex
x2 = (xindex // 16)
x4 = xindex % 16
tmp0 = (-2) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-8) + x3), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hw/chwhimjaxanebrtjkhzlt3p5e3zmcaelap3x24d4rqthpvzpgyhw.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_3 = async_compile.triton('triton_poi_fused_stack_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = (-2) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-7) + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/we/cweirueafylqsjyjsmjusonct3lheuutcs6e2ycyrzgqvks3zmaw.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_4 = async_compile.triton('triton_poi_fused_stack_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = (-2) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-6) + x4), tmp10 & xmask, other=0.0)
tmp12 = (-1) + x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = (-2) + x0
tmp16 = tmp15 >= tmp1
tmp17 = tmp15 < tmp3
tmp18 = tmp13 & tmp14
tmp19 = tmp18 & tmp16
tmp20 = tmp19 & tmp17
tmp21 = tl.load(in_ptr0 + ((-6) + x4), tmp20 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
tl.store(out_ptr1 + (x5 + (400*x2)), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3c/c3cosbe4tpuve4ndt2f3iht6dfl4oeln3wygbqse2zxjua7px3oz.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_5 = async_compile.triton('triton_poi_fused_stack_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tx/ctxvrd522pbccrc44vv4sjiwys2h2oxqtao6d3m23n6kea2csx4a.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_6 = async_compile.triton('triton_poi_fused_stack_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x3 = xindex
x2 = (xindex // 16)
x4 = xindex % 16
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-4) + x3), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6z/c6zkulrxth4a6hxs6xst2q7kvkumo6lz2s6yvwl75fbpjky27woz.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_7 = async_compile.triton('triton_poi_fused_stack_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-3) + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zm/czm3onztoyyry2dkuqbz24jf53jseulzqebjfizdwsv3z5uhwtco.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_8 = async_compile.triton('triton_poi_fused_stack_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-2) + x4), tmp10 & xmask, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = (-2) + x0
tmp16 = tmp15 >= tmp1
tmp17 = tmp15 < tmp3
tmp18 = tmp13 & tmp14
tmp19 = tmp18 & tmp16
tmp20 = tmp19 & tmp17
tmp21 = tl.load(in_ptr0 + ((-2) + x4), tmp20 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
tl.store(out_ptr1 + (x5 + (400*x2)), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/af/cafaejewiqlgvr5gj4de4ywfsfsn7vuebs5vbtaejj33t6ikorer.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_9 = async_compile.triton('triton_poi_fused_stack_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-1) + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dm/cdmmzf65kmnp4hn23r3pawpputmm7a7vrgflavbmpms4bgy5q5v7.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_10 = async_compile.triton('triton_poi_fused_stack_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x3 = xindex
x2 = (xindex // 16)
x4 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (x3), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/db/cdbpbq37c4pc67m52xiv6sjqb7qxyja5y7pdogzus23w7gozjkzu.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_11 = async_compile.triton('triton_poi_fused_stack_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (1 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dj/cdjd4urnffvrwobgb7dfwjf3o52d3cprx7znchrm3v73o6craj3b.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_12 = async_compile.triton('triton_poi_fused_stack_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (2 + x4), tmp10 & xmask, other=0.0)
tmp12 = 1 + x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = (-2) + x0
tmp16 = tmp15 >= tmp1
tmp17 = tmp15 < tmp3
tmp18 = tmp13 & tmp14
tmp19 = tmp18 & tmp16
tmp20 = tmp19 & tmp17
tmp21 = tl.load(in_ptr0 + (2 + x4), tmp20 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
tl.store(out_ptr1 + (x5 + (400*x2)), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qp/cqpod4x4vnk52zv2f63fqtmbimvhszqzmxsyy75vmsizhwlh75lr.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_13 = async_compile.triton('triton_poi_fused_stack_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_13(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = 1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (3 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6j/c6j5a6mlctrhm7kk5fvx5a2uogt7k77vsomilar6lydrnoaefj56.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_14 = async_compile.triton('triton_poi_fused_stack_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_14(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x3 = xindex
x2 = (xindex // 16)
x4 = xindex % 16
tmp0 = 1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (4 + x3), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/u4/cu44h6l7lmng3xsvhwg2n3f5o3cinq32vjsrpgxjhwzkfja2xtc2.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_15 = async_compile.triton('triton_poi_fused_stack_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_15(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = 1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (5 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6m/c6mzcwsxepkf3gn6qfky2umtofsfljy6yf4kafqnpsowva3zh4u7.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_16 = async_compile.triton('triton_poi_fused_stack_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_16(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = 1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (6 + x4), tmp10 & xmask, other=0.0)
tmp12 = 2 + x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = (-2) + x0
tmp16 = tmp15 >= tmp1
tmp17 = tmp15 < tmp3
tmp18 = tmp13 & tmp14
tmp19 = tmp18 & tmp16
tmp20 = tmp19 & tmp17
tmp21 = tl.load(in_ptr0 + (6 + x4), tmp20 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
tl.store(out_ptr1 + (x5 + (400*x2)), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/p5/cp57dq2d3v4deoogeivzfek5bogjhulrc2cmjvlju4vkgoz24nhn.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_17 = async_compile.triton('triton_poi_fused_stack_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_17(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = 2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (7 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/36/c36cz7axzp7lmf66pq7kkdckqkuedt2z6shwg5sxuhzoq5qaqahe.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_18 = async_compile.triton('triton_poi_fused_stack_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_18(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x3 = xindex
x2 = (xindex // 16)
x4 = xindex % 16
tmp0 = 2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (8 + x3), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/s4/cs4rzkg26cfxynqnjajcvewruuvnhhw4k7s5i72dhdmcugxhnkys.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_19 = async_compile.triton('triton_poi_fused_stack_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_19(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = 2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (9 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hq/chqjwbtslcupilsmhs2qpbyabroisgjql6h46r6u35rkvzxcdzuw.py
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# img_stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_5, %slice_7, %slice_9, %slice_11, %slice_13, %slice_15, %slice_17, %slice_19, %slice_21, %slice_23, %slice_25, %slice_27, %slice_29, %slice_31, %slice_33, %slice_35, %slice_37, %slice_39, %slice_41, %slice_43, %slice_45, %slice_47, %slice_49, %slice_51, %slice_53], 2), kwargs = {})
triton_poi_fused_stack_20 = async_compile.triton('triton_poi_fused_stack_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_20(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
x2 = (xindex // 16)
x5 = xindex % 16
tmp0 = 2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (10 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + (400*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ul/culrjcb2tu3ujzix3apsqugds2givsg2rt6gj3uik7vpvra2almr.py
# Topologically Sorted Source Nodes: [mul, sum_1, pred_img_i_2], Original ATen: [aten.mul, aten.sum, aten.div]
# Source node to ATen node mapping:
# mul => mul
# pred_img_i_2 => div
# sum_1 => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_3, %view_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%squeeze, 1.0), kwargs = {})
triton_per_fused_div_mul_sum_21 = async_compile.triton('triton_per_fused_div_mul_sum_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 32],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_sum_21', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mul_sum_21(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
x3 = xindex
r2 = rindex
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (16*r2) + (400*x1)), rmask & xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp6 / tmp7
tmp9 = tmp8 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf25 = empty_strided_cuda((4, 4, 25, 4, 4), (1600, 400, 16, 4, 1), torch.float32)
buf0 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 0) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 16) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_1.run(arg0_1, buf1, 256, grid=grid(256), stream=stream0)
buf2 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 32) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_2.run(arg0_1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 48) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_3.run(arg0_1, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 64) # alias
buf5 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 80) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_4.run(arg0_1, buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 96) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_5.run(arg0_1, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 112) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_6.run(arg0_1, buf7, 256, grid=grid(256), stream=stream0)
buf8 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 128) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_7.run(arg0_1, buf8, 256, grid=grid(256), stream=stream0)
buf9 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 144) # alias
buf10 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 160) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_8.run(arg0_1, buf9, buf10, 256, grid=grid(256), stream=stream0)
buf11 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 176) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_9.run(arg0_1, buf11, 256, grid=grid(256), stream=stream0)
buf12 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 192) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_10.run(arg0_1, buf12, 256, grid=grid(256), stream=stream0)
buf13 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 208) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_11.run(arg0_1, buf13, 256, grid=grid(256), stream=stream0)
buf14 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 224) # alias
buf15 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 240) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_12.run(arg0_1, buf14, buf15, 256, grid=grid(256), stream=stream0)
buf16 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 256) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_13.run(arg0_1, buf16, 256, grid=grid(256), stream=stream0)
buf17 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 272) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_14.run(arg0_1, buf17, 256, grid=grid(256), stream=stream0)
buf18 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 288) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_15.run(arg0_1, buf18, 256, grid=grid(256), stream=stream0)
buf19 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 304) # alias
buf20 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 320) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_16.run(arg0_1, buf19, buf20, 256, grid=grid(256), stream=stream0)
buf21 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 336) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_17.run(arg0_1, buf21, 256, grid=grid(256), stream=stream0)
buf22 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 352) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_18.run(arg0_1, buf22, 256, grid=grid(256), stream=stream0)
buf23 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 368) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_19.run(arg0_1, buf23, 256, grid=grid(256), stream=stream0)
buf24 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4, 1), 384) # alias
# Topologically Sorted Source Nodes: [img_stack], Original ATen: [aten.stack]
triton_poi_fused_stack_20.run(arg0_1, buf24, 256, grid=grid(256), stream=stream0)
del arg0_1
buf26 = empty_strided_cuda((4, 4, 1, 4, 4), (64, 16, 16, 4, 1), torch.float32)
buf27 = reinterpret_tensor(buf26, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf26 # reuse
# Topologically Sorted Source Nodes: [mul, sum_1, pred_img_i_2], Original ATen: [aten.mul, aten.sum, aten.div]
triton_per_fused_div_mul_sum_21.run(buf27, arg1_1, buf25, 256, 25, grid=grid(256), stream=stream0)
del arg1_1
del buf0
del buf1
del buf10
del buf11
del buf12
del buf13
del buf14
del buf15
del buf16
del buf17
del buf18
del buf19
del buf2
del buf20
del buf21
del buf22
del buf23
del buf24
del buf25
del buf3
del buf4
del buf5
del buf6
del buf7
del buf8
del buf9
return (buf27, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class KernelConv(nn.Module):
"""
the class of computing prediction
"""
def __init__(self, kernel_size=[5], sep_conv=False, core_bias=False):
super(KernelConv, self).__init__()
self.kernel_size = sorted(kernel_size)
self.sep_conv = sep_conv
self.core_bias = core_bias
def _sep_conv_core(self, core, batch_size, N, color, height, width):
"""
convert the sep_conv core to conv2d core
2p --> p^2
:param core: shape: batch*(N*2*K)*height*width
:return:
"""
kernel_total = sum(self.kernel_size)
core = core.view(batch_size, N, -1, color, height, width)
if not self.core_bias:
core_1, core_2 = torch.split(core, kernel_total, dim=2)
else:
core_1, core_2, core_3 = torch.split(core, kernel_total, dim=2)
core_out = {}
cur = 0
for K in self.kernel_size:
t1 = core_1[:, :, cur:cur + K, ...].view(batch_size, N, K, 1, 3,
height, width)
t2 = core_2[:, :, cur:cur + K, ...].view(batch_size, N, 1, K, 3,
height, width)
core_out[K] = torch.einsum('ijklno,ijlmno->ijkmno', [t1, t2]).view(
batch_size, N, K * K, color, height, width)
cur += K
return core_out, None if not self.core_bias else core_3.squeeze()
def _convert_dict(self, core, batch_size, N, color, height, width):
"""
make sure the core to be a dict, generally, only one kind of kernel size is suitable for the func.
:param core: shape: batch_size*(N*K*K)*height*width
:return: core_out, a dict
"""
core_out = {}
core = core.view(batch_size, N, -1, color, height, width)
core_out[self.kernel_size[0]] = core[:, :, 0:self.kernel_size[0] **
2, ...]
bias = None if not self.core_bias else core[:, :, -1, ...]
return core_out, bias
def forward(self, frames, core, white_level=1.0, rate=1):
"""
compute the pred image according to core and frames
:param frames: [batch_size, N, 3, height, width]
:param core: [batch_size, N, dict(kernel), 3, height, width]
:return:
"""
if len(frames.size()) == 5:
batch_size, N, color, height, width = frames.size()
else:
batch_size, N, height, width = frames.size()
color = 1
frames = frames.view(batch_size, N, color, height, width)
if self.sep_conv:
core, bias = self._sep_conv_core(core, batch_size, N, color,
height, width)
else:
core, bias = self._convert_dict(core, batch_size, N, color,
height, width)
img_stack = []
pred_img = []
kernel = self.kernel_size[::-1]
for index, K in enumerate(kernel):
if not img_stack:
padding_num = K // 2 * rate
frame_pad = F.pad(frames, [padding_num, padding_num,
padding_num, padding_num])
for i in range(0, K):
for j in range(0, K):
img_stack.append(frame_pad[..., i * rate:i * rate +
height, j * rate:j * rate + width])
img_stack = torch.stack(img_stack, dim=2)
else:
k_diff = (kernel[index - 1] - kernel[index]) // 2
img_stack = img_stack[:, :, k_diff:-k_diff, ...]
pred_img.append(torch.sum(core[K].mul(img_stack), dim=2,
keepdim=False))
pred_img = torch.stack(pred_img, dim=0)
pred_img_i = torch.mean(pred_img, dim=0, keepdim=False)
pred_img_i = pred_img_i.squeeze(2)
if self.core_bias:
if bias is None:
raise ValueError('The bias should not be None.')
pred_img_i += bias
pred_img_i = pred_img_i / white_level
return pred_img_i
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-10 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-9 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
x2 = xindex // 16
x4 = xindex % 16
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-8 + x3), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-7 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-6 + x4), tmp10 & xmask, other=0.0)
tmp12 = -1 + x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = -2 + x0
tmp16 = tmp15 >= tmp1
tmp17 = tmp15 < tmp3
tmp18 = tmp13 & tmp14
tmp19 = tmp18 & tmp16
tmp20 = tmp19 & tmp17
tmp21 = tl.load(in_ptr0 + (-6 + x4), tmp20 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
tl.store(out_ptr1 + (x5 + 400 * x2), tmp21, xmask)
@triton.jit
def triton_poi_fused_stack_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
x2 = xindex // 16
x4 = xindex % 16
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-4 + x3), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-3 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-2 + x4), tmp10 & xmask, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = -2 + x0
tmp16 = tmp15 >= tmp1
tmp17 = tmp15 < tmp3
tmp18 = tmp13 & tmp14
tmp19 = tmp18 & tmp16
tmp20 = tmp19 & tmp17
tmp21 = tl.load(in_ptr0 + (-2 + x4), tmp20 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
tl.store(out_ptr1 + (x5 + 400 * x2), tmp21, xmask)
@triton.jit
def triton_poi_fused_stack_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-1 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
x2 = xindex // 16
x4 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + x3, tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (1 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (2 + x4), tmp10 & xmask, other=0.0)
tmp12 = 1 + x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = -2 + x0
tmp16 = tmp15 >= tmp1
tmp17 = tmp15 < tmp3
tmp18 = tmp13 & tmp14
tmp19 = tmp18 & tmp16
tmp20 = tmp19 & tmp17
tmp21 = tl.load(in_ptr0 + (2 + x4), tmp20 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
tl.store(out_ptr1 + (x5 + 400 * x2), tmp21, xmask)
@triton.jit
def triton_poi_fused_stack_13(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = 1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (3 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_14(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
x2 = xindex // 16
x4 = xindex % 16
tmp0 = 1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (4 + x3), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_15(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = 1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (5 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_16(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = 1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (6 + x4), tmp10 & xmask, other=0.0)
tmp12 = 2 + x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = -2 + x0
tmp16 = tmp15 >= tmp1
tmp17 = tmp15 < tmp3
tmp18 = tmp13 & tmp14
tmp19 = tmp18 & tmp16
tmp20 = tmp19 & tmp17
tmp21 = tl.load(in_ptr0 + (6 + x4), tmp20 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
tl.store(out_ptr1 + (x5 + 400 * x2), tmp21, xmask)
@triton.jit
def triton_poi_fused_stack_17(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = 2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (7 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_18(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
x2 = xindex // 16
x4 = xindex % 16
tmp0 = 2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (8 + x3), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_19(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = 2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (9 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_stack_20(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
x2 = xindex // 16
x5 = xindex % 16
tmp0 = 2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 2 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (10 + x4), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x5 + 400 * x2), tmp11, xmask)
@triton.jit
def triton_per_fused_div_mul_sum_21(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 256
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
x3 = xindex
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 16 * r2 + 400 * x1), rmask & xmask,
other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp6 / tmp7
tmp9 = tmp8 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp9, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf25 = empty_strided_cuda((4, 4, 25, 4, 4), (1600, 400, 16, 4, 1),
torch.float32)
buf0 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 0)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf1 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 16)
triton_poi_fused_stack_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 32)
triton_poi_fused_stack_2[grid(256)](arg0_1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 48)
triton_poi_fused_stack_3[grid(256)](arg0_1, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 64)
buf5 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 80)
triton_poi_fused_stack_4[grid(256)](arg0_1, buf4, buf5, 256, XBLOCK
=128, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 96)
triton_poi_fused_stack_5[grid(256)](arg0_1, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 112)
triton_poi_fused_stack_6[grid(256)](arg0_1, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 128)
triton_poi_fused_stack_7[grid(256)](arg0_1, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16, 4,
1), 144)
buf10 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 160)
triton_poi_fused_stack_8[grid(256)](arg0_1, buf9, buf10, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 176)
triton_poi_fused_stack_9[grid(256)](arg0_1, buf11, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 192)
triton_poi_fused_stack_10[grid(256)](arg0_1, buf12, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 208)
triton_poi_fused_stack_11[grid(256)](arg0_1, buf13, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf14 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 224)
buf15 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 240)
triton_poi_fused_stack_12[grid(256)](arg0_1, buf14, buf15, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf16 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 256)
triton_poi_fused_stack_13[grid(256)](arg0_1, buf16, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf17 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 272)
triton_poi_fused_stack_14[grid(256)](arg0_1, buf17, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf18 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 288)
triton_poi_fused_stack_15[grid(256)](arg0_1, buf18, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf19 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 304)
buf20 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 320)
triton_poi_fused_stack_16[grid(256)](arg0_1, buf19, buf20, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf21 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 336)
triton_poi_fused_stack_17[grid(256)](arg0_1, buf21, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf22 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 352)
triton_poi_fused_stack_18[grid(256)](arg0_1, buf22, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf23 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 368)
triton_poi_fused_stack_19[grid(256)](arg0_1, buf23, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf24 = reinterpret_tensor(buf25, (4, 4, 1, 4, 4), (1600, 400, 16,
4, 1), 384)
triton_poi_fused_stack_20[grid(256)](arg0_1, buf24, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf26 = empty_strided_cuda((4, 4, 1, 4, 4), (64, 16, 16, 4, 1),
torch.float32)
buf27 = reinterpret_tensor(buf26, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf26
triton_per_fused_div_mul_sum_21[grid(256)](buf27, arg1_1, buf25,
256, 25, XBLOCK=8, num_warps=2, num_stages=1)
del arg1_1
del buf0
del buf1
del buf10
del buf11
del buf12
del buf13
del buf14
del buf15
del buf16
del buf17
del buf18
del buf19
del buf2
del buf20
del buf21
del buf22
del buf23
del buf24
del buf25
del buf3
del buf4
del buf5
del buf6
del buf7
del buf8
del buf9
return buf27,
class KernelConvNew(nn.Module):
"""
the class of computing prediction
"""
def __init__(self, kernel_size=[5], sep_conv=False, core_bias=False):
super(KernelConvNew, self).__init__()
self.kernel_size = sorted(kernel_size)
self.sep_conv = sep_conv
self.core_bias = core_bias
def _sep_conv_core(self, core, batch_size, N, color, height, width):
"""
convert the sep_conv core to conv2d core
2p --> p^2
:param core: shape: batch*(N*2*K)*height*width
:return:
"""
kernel_total = sum(self.kernel_size)
core = core.view(batch_size, N, -1, color, height, width)
if not self.core_bias:
core_1, core_2 = torch.split(core, kernel_total, dim=2)
else:
core_1, core_2, core_3 = torch.split(core, kernel_total, dim=2)
core_out = {}
cur = 0
for K in self.kernel_size:
t1 = core_1[:, :, cur:cur + K, ...].view(batch_size, N, K, 1, 3,
height, width)
t2 = core_2[:, :, cur:cur + K, ...].view(batch_size, N, 1, K, 3,
height, width)
core_out[K] = torch.einsum('ijklno,ijlmno->ijkmno', [t1, t2]).view(
batch_size, N, K * K, color, height, width)
cur += K
return core_out, None if not self.core_bias else core_3.squeeze()
def _convert_dict(self, core, batch_size, N, color, height, width):
"""
make sure the core to be a dict, generally, only one kind of kernel size is suitable for the func.
:param core: shape: batch_size*(N*K*K)*height*width
:return: core_out, a dict
"""
core_out = {}
core = core.view(batch_size, N, -1, color, height, width)
core_out[self.kernel_size[0]] = core[:, :, 0:self.kernel_size[0] **
2, ...]
bias = None if not self.core_bias else core[:, :, -1, ...]
return core_out, bias
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| xenbaloch/efficientderain | KernelConv | false | 16,788 | [
"MIT"
] | 109 | d5646815fd14a5a03c859102ecd2f298db7e53be | https://github.com/xenbaloch/efficientderain/tree/d5646815fd14a5a03c859102ecd2f298db7e53be |
SinusoidPositionalEmbedding | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/g5/cg5yrkcqk7zh235kxjif5f3n64hkhyvzjfk4mwrcxbhnogmjbemy.py
# Topologically Sorted Source Nodes: [new_tensor_1, floordiv, mul, truediv, pow_1, pos], Original ATen: [aten.lift_fresh, aten.floor_divide, aten.mul, aten.div, aten.pow]
# Source node to ATen node mapping:
# floordiv => div
# mul => mul
# new_tensor_1 => lift_fresh_copy_1
# pos => div_2
# pow_1 => pow_1
# truediv => div_1
# Graph fragment:
# %lift_fresh_copy_1 : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%_tensor_constant1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor_mode](args = (%lift_fresh_copy_1, 2), kwargs = {rounding_mode: floor})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 2), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 4), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Scalar](args = (10000, %div_1), kwargs = {})
# %div_2 : [num_users=4] = call_function[target=torch.ops.aten.div.Tensor](args = (%unsqueeze, %pow_1), kwargs = {})
triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0 = async_compile.triton('triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = x0
tmp15 = tmp14 < tmp1
tmp16 = tmp14 < tmp3
tmp17 = tl.where(tmp16, tmp5, tmp6)
tmp18 = tmp14 < tmp8
tmp19 = tl.where(tmp18, tmp10, tmp11)
tmp20 = tl.where(tmp15, tmp17, tmp19)
tmp21 = 0.5
tmp22 = tmp20 * tmp21
tmp23 = libdevice.floor(tmp22)
tmp24 = tmp23 * tmp10
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp27 = 10000.0
tmp28 = libdevice.pow(tmp27, tmp26)
tmp29 = tmp13 / tmp28
tl.store(out_ptr0 + (x2), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2v/c2vrviarz5nkwdi5med2ekycd2idji2exdkqj4jty3yzydtnxr7h.py
# Topologically Sorted Source Nodes: [sin, setitem, cos, setitem_1], Original ATen: [aten.sin, aten.copy, aten.cos]
# Source node to ATen node mapping:
# cos => cos
# setitem => copy
# setitem_1 => copy_1
# sin => sin
# Graph fragment:
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%slice_2,), kwargs = {})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_6, %sin), kwargs = {})
# %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%div_2, %copy, 1, 0, 9223372036854775807, 2), kwargs = {})
# %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%slice_4,), kwargs = {})
# %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_13, %cos), kwargs = {})
# %slice_scatter_default_1 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %copy_1, 1, 1, 9223372036854775807, 2), kwargs = {})
triton_poi_fused_copy_cos_sin_1 = async_compile.triton('triton_poi_fused_copy_cos_sin_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_cos_sin_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_copy_cos_sin_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = x2 % 2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 == tmp1
tmp3 = tl.load(in_ptr0 + ((2*(x0 // 2)) + (4*x1)), tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl_math.sin(tmp3)
tmp5 = tl.full(tmp4.shape, 0.0, tmp4.dtype)
tmp6 = tl.where(tmp2, tmp4, tmp5)
tmp7 = x1
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp7 < tmp8
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp7 < tmp10
tmp12 = 0.0
tmp13 = 1.0
tmp14 = tl.where(tmp11, tmp12, tmp13)
tmp15 = tl.full([1], 3, tl.int64)
tmp16 = tmp7 < tmp15
tmp17 = 2.0
tmp18 = 3.0
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp14, tmp19)
tmp21 = x0
tmp22 = tmp21 < tmp8
tmp23 = tmp21 < tmp10
tmp24 = tl.where(tmp23, tmp12, tmp13)
tmp25 = tmp21 < tmp15
tmp26 = tl.where(tmp25, tmp17, tmp18)
tmp27 = tl.where(tmp22, tmp24, tmp26)
tmp28 = 0.5
tmp29 = tmp27 * tmp28
tmp30 = libdevice.floor(tmp29)
tmp31 = tmp30 * tmp17
tmp32 = 0.25
tmp33 = tmp31 * tmp32
tmp34 = 10000.0
tmp35 = libdevice.pow(tmp34, tmp33)
tmp36 = tmp20 / tmp35
tmp37 = tl.where(tmp2, tmp6, tmp36)
tmp38 = tmp21 >= tmp10
tmp39 = ((-1) + x0) % 2
tmp40 = tmp39 == tmp1
tmp41 = tmp38 & tmp40
tmp42 = tl.load(in_ptr0 + (1 + (2*(triton_helpers.div_floor_integer((-1) + x0, 2))) + (4*x1)), tmp41 & xmask, eviction_policy='evict_last', other=0.0)
tmp43 = tl_math.cos(tmp42)
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp41, tmp43, tmp44)
tmp46 = tl.where(tmp41, tmp45, tmp37)
tl.store(in_out_ptr0 + (x2), tmp46, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [new_tensor_1, floordiv, mul, truediv, pow_1, pos], Original ATen: [aten.lift_fresh, aten.floor_divide, aten.mul, aten.div, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0.run(buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sin, setitem, cos, setitem_1], Original ATen: [aten.sin, aten.copy, aten.cos]
triton_poi_fused_copy_cos_sin_1.run(buf2, buf0, 16, grid=grid(16), stream=stream0)
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SinusoidPositionalEmbedding(nn.Module):
def forward(self, x):
seq_len, n_model = x[0].shape
pos = x.new_tensor(range(seq_len)).unsqueeze(-1) / 10000 ** (x.
new_tensor(range(n_model)) // 2 * 2 / n_model)
pos[:, 0::2], pos[:, 1::2] = pos[:, 0::2].sin(), pos[:, 1::2].cos()
return pos
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = x0
tmp15 = tmp14 < tmp1
tmp16 = tmp14 < tmp3
tmp17 = tl.where(tmp16, tmp5, tmp6)
tmp18 = tmp14 < tmp8
tmp19 = tl.where(tmp18, tmp10, tmp11)
tmp20 = tl.where(tmp15, tmp17, tmp19)
tmp21 = 0.5
tmp22 = tmp20 * tmp21
tmp23 = libdevice.floor(tmp22)
tmp24 = tmp23 * tmp10
tmp25 = 0.25
tmp26 = tmp24 * tmp25
tmp27 = 10000.0
tmp28 = libdevice.pow(tmp27, tmp26)
tmp29 = tmp13 / tmp28
tl.store(out_ptr0 + x2, tmp29, xmask)
@triton.jit
def triton_poi_fused_copy_cos_sin_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = x2 % 2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 == tmp1
tmp3 = tl.load(in_ptr0 + (2 * (x0 // 2) + 4 * x1), tmp2 & xmask,
eviction_policy='evict_last', other=0.0)
tmp4 = tl_math.sin(tmp3)
tmp5 = tl.full(tmp4.shape, 0.0, tmp4.dtype)
tmp6 = tl.where(tmp2, tmp4, tmp5)
tmp7 = x1
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp7 < tmp8
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp7 < tmp10
tmp12 = 0.0
tmp13 = 1.0
tmp14 = tl.where(tmp11, tmp12, tmp13)
tmp15 = tl.full([1], 3, tl.int64)
tmp16 = tmp7 < tmp15
tmp17 = 2.0
tmp18 = 3.0
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp14, tmp19)
tmp21 = x0
tmp22 = tmp21 < tmp8
tmp23 = tmp21 < tmp10
tmp24 = tl.where(tmp23, tmp12, tmp13)
tmp25 = tmp21 < tmp15
tmp26 = tl.where(tmp25, tmp17, tmp18)
tmp27 = tl.where(tmp22, tmp24, tmp26)
tmp28 = 0.5
tmp29 = tmp27 * tmp28
tmp30 = libdevice.floor(tmp29)
tmp31 = tmp30 * tmp17
tmp32 = 0.25
tmp33 = tmp31 * tmp32
tmp34 = 10000.0
tmp35 = libdevice.pow(tmp34, tmp33)
tmp36 = tmp20 / tmp35
tmp37 = tl.where(tmp2, tmp6, tmp36)
tmp38 = tmp21 >= tmp10
tmp39 = (-1 + x0) % 2
tmp40 = tmp39 == tmp1
tmp41 = tmp38 & tmp40
tmp42 = tl.load(in_ptr0 + (1 + 2 * triton_helpers.div_floor_integer(-1 +
x0, 2) + 4 * x1), tmp41 & xmask, eviction_policy='evict_last',
other=0.0)
tmp43 = tl_math.cos(tmp42)
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp41, tmp43, tmp44)
tmp46 = tl.where(tmp41, tmp45, tmp37)
tl.store(in_out_ptr0 + x2, tmp46, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0[grid(16)](buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = buf1
del buf1
triton_poi_fused_copy_cos_sin_1[grid(16)](buf2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
return buf2,
class SinusoidPositionalEmbeddingNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| yzhangcs/parser | SinusoidPositionalEmbedding | false | 16,789 | [
"MIT"
] | 439 | 3abebde1c9fe0bf2e99adce845aaf2a04b194f8a | https://github.com/yzhangcs/parser/tree/3abebde1c9fe0bf2e99adce845aaf2a04b194f8a |
OfflineTripletLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/b4/cb47btozyurbtsx2wjuhvhiwfljsy4asgihwjpx7dw54ecfcg5ga.py
# Topologically Sorted Source Nodes: [sub, pow_1, distance_positive, sub_1, pow_2, distance_negative, sub_2, add, losses, mean], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.relu, aten.mean]
# Source node to ATen node mapping:
# add => add
# distance_negative => sum_2
# distance_positive => sum_1
# losses => relu
# mean => mean
# pow_1 => pow_1
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1]), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 0.1), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%relu,), kwargs = {})
triton_per_fused_add_mean_pow_relu_sub_sum_0 = async_compile.triton('triton_per_fused_add_mean_pow_relu_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 2],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_pow_relu_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 2
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2 = tl.load(in_ptr0 + (2 + r0), None)
tmp8 = tl.broadcast_to(tmp0, [XBLOCK, 1])
tmp9 = tl.load(in_ptr0 + (1))
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, 1])
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp11 = tmp8 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp12 - tmp7
tmp14 = 0.1
tmp15 = tmp13 + tmp14
tmp16 = tl.full([1, 1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp18 = 1.0
tmp19 = tmp17 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, ), (1, ), torch.float32)
buf1 = reinterpret_tensor(buf0, (), (), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, distance_positive, sub_1, pow_2, distance_negative, sub_2, add, losses, mean], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.relu, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_pow_relu_sub_sum_0.run(buf1, arg0_1, 1, 2, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch import nn
class OfflineTripletLoss(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin=0.1):
super(OfflineTripletLoss, self).__init__()
self.margin = margin
def forward(self, inputs, size_average=True):
batchsize = inputs[0].size(0)
anchor = inputs[0][0:int(batchsize / 3)]
positive = inputs[0][int(batchsize / 3):int(batchsize * 2 / 3)]
negative = inputs[0][int(batchsize * 2 / 3):]
anchor = anchor.view(int(batchsize / 3), -1)
positive = positive.view(int(batchsize / 3), -1)
negative = negative.view(int(batchsize / 3), -1)
distance_positive = (anchor - positive).pow(2).sum(1)
distance_negative = (anchor - negative).pow(2).sum(1)
losses = F.relu(distance_positive - distance_negative + self.margin)
return losses.mean() if size_average else losses.sum()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2 = tl.load(in_ptr0 + (2 + r0), None)
tmp8 = tl.broadcast_to(tmp0, [XBLOCK, 1])
tmp9 = tl.load(in_ptr0 + 1)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, 1])
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp11 = tmp8 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp12 - tmp7
tmp14 = 0.1
tmp15 = tmp13 + tmp14
tmp16 = tl.full([1, 1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp18 = 1.0
tmp19 = tmp17 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1,), (1,), torch.float32)
buf1 = reinterpret_tensor(buf0, (), (), 0)
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_pow_relu_sub_sum_0[grid(1)](buf1, arg0_1,
1, 2, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class OfflineTripletLossNew(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin=0.1):
super(OfflineTripletLossNew, self).__init__()
self.margin = margin
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| zhangxinyu-tj/PAST | OfflineTripletLoss | false | 16,790 | [
"MIT"
] | 112 | 67f1f7a780e869aa7867167538edb03faa96dec5 | https://github.com/zhangxinyu-tj/PAST/tree/67f1f7a780e869aa7867167538edb03faa96dec5 |
SinusoidRelativePositionalEmbedding | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/lv/clvu7pznrzf5lgv6pv6scrzijwgmfhlsmenbnh3mhuznapdi2xtu.py
# Topologically Sorted Source Nodes: [new_tensor_1, floordiv, mul, truediv, pow_1, pos_1], Original ATen: [aten.lift_fresh, aten.floor_divide, aten.mul, aten.div, aten.pow]
# Source node to ATen node mapping:
# floordiv => div
# mul => mul
# new_tensor_1 => lift_fresh_copy_1
# pos_1 => div_2
# pow_1 => pow_1
# truediv => div_1
# Graph fragment:
# %lift_fresh_copy_1 : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%_tensor_constant1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor_mode](args = (%lift_fresh_copy_1, 2), kwargs = {rounding_mode: floor})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 2), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 4), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Scalar](args = (10000, %div_1), kwargs = {})
# %div_2 : [num_users=4] = call_function[target=torch.ops.aten.div.Tensor](args = (%unsqueeze_1, %pow_1), kwargs = {})
triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0 = async_compile.triton('triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x0 = xindex % 4
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = x2
tmp15 = tmp14 < tmp1
tmp16 = tmp14 < tmp3
tmp17 = tl.where(tmp16, tmp5, tmp6)
tmp18 = tmp14 < tmp8
tmp19 = tl.where(tmp18, tmp10, tmp11)
tmp20 = tl.where(tmp15, tmp17, tmp19)
tmp21 = tmp13 - tmp20
tmp22 = x0
tmp23 = tmp22 < tmp1
tmp24 = tmp22 < tmp3
tmp25 = tl.where(tmp24, tmp5, tmp6)
tmp26 = tmp22 < tmp8
tmp27 = tl.where(tmp26, tmp10, tmp11)
tmp28 = tl.where(tmp23, tmp25, tmp27)
tmp29 = 0.5
tmp30 = tmp28 * tmp29
tmp31 = libdevice.floor(tmp30)
tmp32 = tmp31 * tmp10
tmp33 = 0.25
tmp34 = tmp32 * tmp33
tmp35 = 10000.0
tmp36 = libdevice.pow(tmp35, tmp34)
tmp37 = tmp21 / tmp36
tl.store(out_ptr0 + (x3), tmp37, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/d7/cd7q6adm5uhth6keobcit236ml7jdro7azjcbnyr3v47kmfaikth.py
# Topologically Sorted Source Nodes: [sin, setitem, cos, setitem_1], Original ATen: [aten.sin, aten.copy, aten.cos]
# Source node to ATen node mapping:
# cos => cos
# setitem => copy
# setitem_1 => copy_1
# sin => sin
# Graph fragment:
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%slice_1,), kwargs = {})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_3, %sin), kwargs = {})
# %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%div_2, %copy, 2, 0, 9223372036854775807, 2), kwargs = {})
# %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%slice_2,), kwargs = {})
# %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_6, %cos), kwargs = {})
# %slice_scatter_default_1 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %copy_1, 2, 1, 9223372036854775807, 2), kwargs = {})
triton_poi_fused_copy_cos_sin_1 = async_compile.triton('triton_poi_fused_copy_cos_sin_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_cos_sin_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_copy_cos_sin_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = (xindex // 4)
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
tmp0 = x4 % 2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 == tmp1
tmp3 = tl.load(in_ptr0 + ((2*(x0 // 2)) + (4*x3)), tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl_math.sin(tmp3)
tmp5 = tl.full(tmp4.shape, 0.0, tmp4.dtype)
tmp6 = tl.where(tmp2, tmp4, tmp5)
tmp7 = x1
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp7 < tmp8
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp7 < tmp10
tmp12 = 0.0
tmp13 = 1.0
tmp14 = tl.where(tmp11, tmp12, tmp13)
tmp15 = tl.full([1], 3, tl.int64)
tmp16 = tmp7 < tmp15
tmp17 = 2.0
tmp18 = 3.0
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp14, tmp19)
tmp21 = x2
tmp22 = tmp21 < tmp8
tmp23 = tmp21 < tmp10
tmp24 = tl.where(tmp23, tmp12, tmp13)
tmp25 = tmp21 < tmp15
tmp26 = tl.where(tmp25, tmp17, tmp18)
tmp27 = tl.where(tmp22, tmp24, tmp26)
tmp28 = tmp20 - tmp27
tmp29 = x0
tmp30 = tmp29 < tmp8
tmp31 = tmp29 < tmp10
tmp32 = tl.where(tmp31, tmp12, tmp13)
tmp33 = tmp29 < tmp15
tmp34 = tl.where(tmp33, tmp17, tmp18)
tmp35 = tl.where(tmp30, tmp32, tmp34)
tmp36 = 0.5
tmp37 = tmp35 * tmp36
tmp38 = libdevice.floor(tmp37)
tmp39 = tmp38 * tmp17
tmp40 = 0.25
tmp41 = tmp39 * tmp40
tmp42 = 10000.0
tmp43 = libdevice.pow(tmp42, tmp41)
tmp44 = tmp28 / tmp43
tmp45 = tl.where(tmp2, tmp6, tmp44)
tmp46 = tmp29 >= tmp10
tmp47 = ((-1) + x0) % 2
tmp48 = tmp47 == tmp1
tmp49 = tmp46 & tmp48
tmp50 = tl.load(in_ptr0 + (1 + (2*(triton_helpers.div_floor_integer((-1) + x0, 2))) + (4*x3)), tmp49 & xmask, eviction_policy='evict_last', other=0.0)
tmp51 = tl_math.cos(tmp50)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp49, tmp51, tmp52)
tmp54 = tl.where(tmp49, tmp53, tmp45)
tl.store(in_out_ptr0 + (x4), tmp54, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [new_tensor_1, floordiv, mul, truediv, pow_1, pos_1], Original ATen: [aten.lift_fresh, aten.floor_divide, aten.mul, aten.div, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0.run(buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sin, setitem, cos, setitem_1], Original ATen: [aten.sin, aten.copy, aten.cos]
triton_poi_fused_copy_cos_sin_1.run(buf2, buf0, 64, grid=grid(64), stream=stream0)
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SinusoidRelativePositionalEmbedding(nn.Module):
def forward(self, x):
seq_len, n_model = x[0].shape
pos = x.new_tensor(range(seq_len))
pos = (pos - pos.unsqueeze(-1)).unsqueeze(-1) / 10000 ** (x.
new_tensor(range(n_model)) // 2 * 2 / n_model)
pos[..., 0::2], pos[..., 1::2] = pos[..., 0::2].sin(), pos[..., 1::2
].cos()
return pos
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x0 = xindex % 4
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = x2
tmp15 = tmp14 < tmp1
tmp16 = tmp14 < tmp3
tmp17 = tl.where(tmp16, tmp5, tmp6)
tmp18 = tmp14 < tmp8
tmp19 = tl.where(tmp18, tmp10, tmp11)
tmp20 = tl.where(tmp15, tmp17, tmp19)
tmp21 = tmp13 - tmp20
tmp22 = x0
tmp23 = tmp22 < tmp1
tmp24 = tmp22 < tmp3
tmp25 = tl.where(tmp24, tmp5, tmp6)
tmp26 = tmp22 < tmp8
tmp27 = tl.where(tmp26, tmp10, tmp11)
tmp28 = tl.where(tmp23, tmp25, tmp27)
tmp29 = 0.5
tmp30 = tmp28 * tmp29
tmp31 = libdevice.floor(tmp30)
tmp32 = tmp31 * tmp10
tmp33 = 0.25
tmp34 = tmp32 * tmp33
tmp35 = 10000.0
tmp36 = libdevice.pow(tmp35, tmp34)
tmp37 = tmp21 / tmp36
tl.store(out_ptr0 + x3, tmp37, xmask)
@triton.jit
def triton_poi_fused_copy_cos_sin_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = xindex // 4
x1 = xindex // 4 % 4
x2 = xindex // 16
tmp0 = x4 % 2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 == tmp1
tmp3 = tl.load(in_ptr0 + (2 * (x0 // 2) + 4 * x3), tmp2 & xmask,
eviction_policy='evict_last', other=0.0)
tmp4 = tl_math.sin(tmp3)
tmp5 = tl.full(tmp4.shape, 0.0, tmp4.dtype)
tmp6 = tl.where(tmp2, tmp4, tmp5)
tmp7 = x1
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp7 < tmp8
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp7 < tmp10
tmp12 = 0.0
tmp13 = 1.0
tmp14 = tl.where(tmp11, tmp12, tmp13)
tmp15 = tl.full([1], 3, tl.int64)
tmp16 = tmp7 < tmp15
tmp17 = 2.0
tmp18 = 3.0
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp14, tmp19)
tmp21 = x2
tmp22 = tmp21 < tmp8
tmp23 = tmp21 < tmp10
tmp24 = tl.where(tmp23, tmp12, tmp13)
tmp25 = tmp21 < tmp15
tmp26 = tl.where(tmp25, tmp17, tmp18)
tmp27 = tl.where(tmp22, tmp24, tmp26)
tmp28 = tmp20 - tmp27
tmp29 = x0
tmp30 = tmp29 < tmp8
tmp31 = tmp29 < tmp10
tmp32 = tl.where(tmp31, tmp12, tmp13)
tmp33 = tmp29 < tmp15
tmp34 = tl.where(tmp33, tmp17, tmp18)
tmp35 = tl.where(tmp30, tmp32, tmp34)
tmp36 = 0.5
tmp37 = tmp35 * tmp36
tmp38 = libdevice.floor(tmp37)
tmp39 = tmp38 * tmp17
tmp40 = 0.25
tmp41 = tmp39 * tmp40
tmp42 = 10000.0
tmp43 = libdevice.pow(tmp42, tmp41)
tmp44 = tmp28 / tmp43
tmp45 = tl.where(tmp2, tmp6, tmp44)
tmp46 = tmp29 >= tmp10
tmp47 = (-1 + x0) % 2
tmp48 = tmp47 == tmp1
tmp49 = tmp46 & tmp48
tmp50 = tl.load(in_ptr0 + (1 + 2 * triton_helpers.div_floor_integer(-1 +
x0, 2) + 4 * x3), tmp49 & xmask, eviction_policy='evict_last',
other=0.0)
tmp51 = tl_math.cos(tmp50)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp49, tmp51, tmp52)
tmp54 = tl.where(tmp49, tmp53, tmp45)
tl.store(in_out_ptr0 + x4, tmp54, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_floor_divide_lift_fresh_mul_pow_0[grid(64)](buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf2 = buf1
del buf1
triton_poi_fused_copy_cos_sin_1[grid(64)](buf2, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
return buf2,
class SinusoidRelativePositionalEmbeddingNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| yzhangcs/parser | SinusoidRelativePositionalEmbedding | false | 16,791 | [
"MIT"
] | 439 | 3abebde1c9fe0bf2e99adce845aaf2a04b194f8a | https://github.com/yzhangcs/parser/tree/3abebde1c9fe0bf2e99adce845aaf2a04b194f8a |
SoftCrossEntropy | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ud/cudxyg6cvuxodvjk7bq5jdxqkaf2laa4wqrpyr3wgm5tpdonptka.py
# Topologically Sorted Source Nodes: [log_softmax, log_likelihood, mul, sum_1, loss], Original ATen: [aten._log_softmax, aten.neg, aten.mul, aten.sum, aten.div]
# Source node to ATen node mapping:
# log_likelihood => neg
# log_softmax => exp, log, sub_1, sum_1
# loss => div
# mul => mul
# sum_1 => sum_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %arg1_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 4), kwargs = {})
triton_per_fused__log_softmax_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp14 = -tmp13
tmp16 = tmp14 * tmp15
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 0.25
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [log_softmax, log_likelihood, mul, sum_1, loss], Original ATen: [aten._log_softmax, aten.neg, aten.mul, aten.sum, aten.div]
triton_per_fused__log_softmax_div_mul_neg_sum_1.run(buf2, buf0, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg1_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class SoftCrossEntropy(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs, target):
log_likelihood = -F.log_softmax(inputs, dim=1)
sample_num, _class_num = target.shape
loss = torch.sum(torch.mul(log_likelihood, target)) / sample_num
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp14 = -tmp13
tmp16 = tmp14 * tmp15
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 0.25
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, buf0,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
class SoftCrossEntropyNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zake7749/WSDM-Cup-2019 | SoftCrossEntropy | false | 16,792 | [
"Apache-2.0"
] | 64 | 5e9c9ae4197a5dedf6dbccc712bb2bbaae99edee | https://github.com/zake7749/WSDM-Cup-2019/tree/5e9c9ae4197a5dedf6dbccc712bb2bbaae99edee |
Quantization | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/od/codpy52rvc5askcobcgimowe6roz5jcvkndhmvmghsacnld6obn2.py
# Topologically Sorted Source Nodes: [input_1, mul, round_1, output], Original ATen: [aten.clamp, aten.mul, aten.round, aten.div]
# Source node to ATen node mapping:
# input_1 => clamp_max, clamp_min
# mul => mul
# output => div
# round_1 => round_1
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, 255.0), kwargs = {})
# %round_1 : [num_users=1] = call_function[target=torch.ops.aten.round.default](args = (%mul,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%round_1, 255.0), kwargs = {})
triton_poi_fused_clamp_div_mul_round_0 = async_compile.triton('triton_poi_fused_clamp_div_mul_round_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_mul_round_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_mul_round_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 1.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = 255.0
tmp6 = tmp4 * tmp5
tmp7 = libdevice.nearbyint(tmp6)
tmp8 = 0.00392156862745098
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_1, mul, round_1, output], Original ATen: [aten.clamp, aten.mul, aten.round, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_div_mul_round_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Quant(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
input = torch.clamp(input, 0, 1)
output = (input * 255.0).round() / 255.0
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output
class Quantization(nn.Module):
def __init__(self):
super(Quantization, self).__init__()
def forward(self, input):
return Quant.apply(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_mul_round_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 1.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = 255.0
tmp6 = tmp4 * tmp5
tmp7 = libdevice.nearbyint(tmp6)
tmp8 = 0.00392156862745098
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_mul_round_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class Quant(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
input = torch.clamp(input, 0, 1)
output = (input * 255.0).round() / 255.0
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output
class QuantizationNew(nn.Module):
def __init__(self):
super(QuantizationNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| yzxing87/Invertible-ISP | Quantization | false | 16,793 | [
"MIT"
] | 246 | 344dd333dd2a075f6a9e4ffc445dc387ca3014c4 | https://github.com/yzxing87/Invertible-ISP/tree/344dd333dd2a075f6a9e4ffc445dc387ca3014c4 |
SoftMarginTriplet | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/6u/c6uhndgoedypunrbeii3admbblv5u53kul4axszjvsnuuo2xn54m.py
# Topologically Sorted Source Nodes: [sub, mul, add, loss, loss_1], Original ATen: [aten.sub, aten.mul, aten.add, aten.relu, aten.mean]
# Source node to ATen node mapping:
# add => add
# loss => relu
# loss_1 => mean
# mul => mul
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, 0.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %mul), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%relu,), kwargs = {})
triton_per_fused_add_mean_mul_relu_sub_0 = async_compile.triton('triton_per_fused_add_mean_mul_relu_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_relu_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_relu_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp3 = tl.load(in_ptr2 + (r0), None)
tmp2 = tmp0 - tmp1
tmp4 = 0.0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 256.0
tmp13 = tmp11 / tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp13, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, mul, add, loss, loss_1], Original ATen: [aten.sub, aten.mul, aten.add, aten.relu, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mul_relu_sub_0.run(buf1, arg0_1, arg1_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
class SoftMarginTriplet(_Loss):
__constants__ = ['reduction']
"""
inputs `x1`, `x2`, two 1D mini-batch `Tensor`s,
and a label 1D mini-batch tensor `y` with values (`1` or `-1`).
If `y == 1` then it assumed the first input should be ranked higher
(have a larger value) than the second input, and vice-versa for `y == -1`.
The loss function for each sample in the mini-batch is:
loss(x, y) = max(0, -y * (x1 - x2) + margin)
reduction='elementwise_mean'|'none'|'sum'
"""
def __init__(self, margin=0.0, size_average=None, reduce=None,
reduction='elementwise_mean'):
super(SoftMarginTriplet, self).__init__(size_average, reduce, reduction
)
self.margin = margin
def forward(self, dist_ap, dist_an, softmargin):
loss = F.relu(dist_ap - dist_an + softmargin * self.margin)
if self.reduction == 'elementwise_mean':
loss = loss.mean()
else:
loss = loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_relu_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp3 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 - tmp1
tmp4 = 0.0
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 256.0
tmp13 = tmp11 / tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_relu_sub_0[grid(1)](buf1, arg0_1,
arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class SoftMarginTripletNew(_Loss):
__constants__ = ['reduction']
"""
inputs `x1`, `x2`, two 1D mini-batch `Tensor`s,
and a label 1D mini-batch tensor `y` with values (`1` or `-1`).
If `y == 1` then it assumed the first input should be ranked higher
(have a larger value) than the second input, and vice-versa for `y == -1`.
The loss function for each sample in the mini-batch is:
loss(x, y) = max(0, -y * (x1 - x2) + margin)
reduction='elementwise_mean'|'none'|'sum'
"""
def __init__(self, margin=0.0, size_average=None, reduce=None,
reduction='elementwise_mean'):
super(SoftMarginTripletNew, self).__init__(size_average, reduce,
reduction)
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| zhangxinyu-tj/PAST | SoftMarginTriplet | false | 16,794 | [
"MIT"
] | 112 | 67f1f7a780e869aa7867167538edb03faa96dec5 | https://github.com/zhangxinyu-tj/PAST/tree/67f1f7a780e869aa7867167538edb03faa96dec5 |
BCELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/tq/ctqe4t2u4pnmkntuiboiacivzgxvbrz6j3k3iuziygulkdq2phdb.py
# Topologically Sorted Source Nodes: [loss, pred_sigmoid], Original ATen: [aten.binary_cross_entropy_with_logits, aten.sigmoid]
# Source node to ATen node mapping:
# loss => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2
# pred_sigmoid => sigmoid
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sigmoid), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %sigmoid), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sigmoid,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
triton_poi_fused_binary_cross_entropy_with_logits_sigmoid_0 = async_compile.triton('triton_poi_fused_binary_cross_entropy_with_logits_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_binary_cross_entropy_with_logits_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_binary_cross_entropy_with_logits_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp2 * tmp4
tmp6 = 0.0
tmp7 = triton_helpers.minimum(tmp6, tmp4)
tmp8 = tl_math.abs(tmp4)
tmp9 = -tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = libdevice.log1p(tmp10)
tmp12 = tmp7 - tmp11
tmp13 = tmp5 - tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [loss, pred_sigmoid], Original ATen: [aten.binary_cross_entropy_with_logits, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_with_logits_sigmoid_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (reinterpret_tensor(buf0, (256, ), (1, ), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
def bce_loss(pred, target, use_sigmoid=True):
"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
if use_sigmoid:
func = F.binary_cross_entropy_with_logits
else:
func = F.binary_cross_entropy
pred_sigmoid = pred.sigmoid() if use_sigmoid else pred
loss = func(pred_sigmoid, target, reduction='none')
return loss.flatten()
class BCELoss(nn.Module):
"""
Args:
use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
Defaults to True.
"""
def __init__(self, use_sigmoid=True):
super(BCELoss, self).__init__()
self.use_sigmoid = use_sigmoid
def forward(self, pred, target):
"""Forward function.
Args:
pred (torch.Tensor): Predicted joint representation of
classification and quality (IoU) estimation with shape (N, C),
C is the number of classes.
target (tuple([torch.Tensor])): Target category label with shape
(N,) and target quality label with shape (N,).
"""
return bce_loss(pred, target, use_sigmoid=self.use_sigmoid)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_binary_cross_entropy_with_logits_sigmoid_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp2 * tmp4
tmp6 = 0.0
tmp7 = triton_helpers.minimum(tmp6, tmp4)
tmp8 = tl_math.abs(tmp4)
tmp9 = -tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = libdevice.log1p(tmp10)
tmp12 = tmp7 - tmp11
tmp13 = tmp5 - tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_with_logits_sigmoid_0[grid(256)](
arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return reinterpret_tensor(buf0, (256,), (1,), 0),
def bce_loss(pred, target, use_sigmoid=True):
"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
if use_sigmoid:
func = F.binary_cross_entropy_with_logits
else:
func = F.binary_cross_entropy
pred_sigmoid = pred.sigmoid() if use_sigmoid else pred
loss = func(pred_sigmoid, target, reduction='none')
return loss.flatten()
class BCELossNew(nn.Module):
"""
Args:
use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
Defaults to True.
"""
def __init__(self, use_sigmoid=True):
super(BCELossNew, self).__init__()
self.use_sigmoid = use_sigmoid
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zhangzhengde0225/SwinTrack | BCELoss | false | 16,795 | [
"MIT"
] | 143 | 526be17f8ef266cb924c6939bd8dda23e9b73249 | https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249 |
DotAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/bl/cblmokvcpsr2ttllzsqpn7e5if5ssmadzarqlyj626zemyxwynho.py
# Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# repeat => repeat
# Graph fragment:
# %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%permute, [4, 1, 1]), kwargs = {})
triton_poi_fused_repeat_0 = async_compile.triton('triton_poi_fused_repeat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ia/ciaywmr3mhpqbva3csqbsfhnix2munbtjinp3bnhke3lbyahwgx6.py
# Topologically Sorted Source Nodes: [relu, attn_energies], Original ATen: [aten.relu, aten._softmax]
# Source node to ATen node mapping:
# attn_energies => amax, exp, sub
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%relu, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_relu_1 = async_compile.triton('triton_poi_fused__softmax_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = triton_helpers.maximum(tmp1, tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = triton_helpers.maximum(tmp1, tmp11)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py
# Topologically Sorted Source Nodes: [attn_energies], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_energies => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/va/cvaifdneghk3fq3ckdpa5w45zknznh7m3lb2dzfjyar66yxqs77s.py
# Topologically Sorted Source Nodes: [weighted, sum_2, representations], Original ATen: [aten.mul, aten.sum, aten.squeeze]
# Source node to ATen node mapping:
# representations => squeeze_1
# sum_2 => sum_3
# weighted => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %expand_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %squeeze_1 : [num_users=1] = call_function[target=torch.ops.aten.squeeze.default](args = (%sum_3,), kwargs = {})
triton_poi_fused_mul_squeeze_sum_3 = async_compile.triton('triton_poi_fused_mul_squeeze_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_squeeze_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_squeeze_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat]
stream0 = get_raw_stream(0)
triton_poi_fused_repeat_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [repeat, weights], Original ATen: [aten.repeat, aten.bmm]
extern_kernels.bmm(primals_1, buf0, out=buf1)
buf2 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [relu, attn_energies], Original ATen: [aten.relu, aten._softmax]
triton_poi_fused__softmax_relu_1.run(buf1, buf2, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_energies], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.div]
triton_poi_fused__softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [weighted, sum_2, representations], Original ATen: [aten.mul, aten.sum, aten.squeeze]
triton_poi_fused_mul_squeeze_sum_3.run(primals_1, buf4, buf5, 16, grid=grid(16), stream=stream0)
return (buf5, buf4, primals_1, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
class DotAttention(nn.Module):
def __init__(self, hidden_size):
super(DotAttention, self).__init__()
self.hidden_size = hidden_size
self.attn_vector = nn.Parameter(torch.Tensor(1, hidden_size),
requires_grad=True)
init.xavier_uniform(self.attn_vector.data)
def get_mask(self):
pass
def forward(self, inputs, lengths=None):
batch_size, _max_len = inputs.size()[:2]
"""
print("INPUTS", inputs.size())
print("ATTN", self.attn_vector # (1, hidden_size)
.unsqueeze(0) # (1, hidden_size, 1)
.transpose(2, 1)
.repeat(batch_size, 1, 1).size())"""
weights = torch.bmm(inputs, self.attn_vector.unsqueeze(0).transpose
(2, 1).repeat(batch_size, 1, 1))
attn_energies = F.softmax(F.relu(weights.squeeze()))
_sums = attn_energies.sum(-1).unsqueeze(1).expand_as(attn_energies)
attn_weights = attn_energies / _sums
weighted = torch.mul(inputs, attn_weights.unsqueeze(-1).expand_as(
inputs))
representations = weighted.sum(1).squeeze()
return representations, attn_weights
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = triton_helpers.maximum(tmp1, tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = triton_helpers.maximum(tmp1, tmp11)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_squeeze_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_repeat_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(primals_1, buf0, out=buf1)
buf2 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_relu_1[grid(16)](buf1, buf2, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused_mul_squeeze_sum_3[grid(16)](primals_1, buf4, buf5,
16, XBLOCK=16, num_warps=1, num_stages=1)
return buf5, buf4, primals_1, buf1
class DotAttentionNew(nn.Module):
def __init__(self, hidden_size):
super(DotAttentionNew, self).__init__()
self.hidden_size = hidden_size
self.attn_vector = nn.Parameter(torch.Tensor(1, hidden_size),
requires_grad=True)
init.xavier_uniform(self.attn_vector.data)
def get_mask(self):
pass
def forward(self, input_0):
primals_2 = self.attn_vector
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
| zake7749/DeepToxic | DotAttention | false | 16,796 | [
"MIT"
] | 206 | 92710446c55fe60526099f808a7e1179402e199f | https://github.com/zake7749/DeepToxic/tree/92710446c55fe60526099f808a7e1179402e199f |
IoULoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/lv/clv77b2s32ncgr2yjerx3jwvnwzihlxi5w6vnhnfijuy35b5n2er.py
# Topologically Sorted Source Nodes: [sub, sub_1, area1, sub_2, sub_3, area2, add, overlap, union, eps, union_1, ious, ious_1, log, loss], Original ATen: [aten.sub, aten.mul, aten.add, aten.lift_fresh, aten.maximum, aten.div, aten.clamp, aten.log, aten.neg]
# Source node to ATen node mapping:
# add => add
# area1 => mul
# area2 => mul_1
# eps => full_default
# ious => div
# ious_1 => clamp_min_1
# log => log
# loss => neg
# overlap => mul_2
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# union => sub_5
# union_1 => maximum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select, %select_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_2, %select_3), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_4, %select_5), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_6, %select_7), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %sub_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, %select_9), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul_2), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 9.999999974752427e-07), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%sub_5, %full_default), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, %maximum_1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div, 1e-06), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {})
triton_poi_fused_add_clamp_div_lift_fresh_log_maximum_mul_neg_sub_0 = async_compile.triton('triton_poi_fused_add_clamp_div_lift_fresh_log_maximum_mul_neg_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_lift_fresh_log_maximum_mul_neg_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_lift_fresh_log_maximum_mul_neg_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp2 - tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp11 - tmp14
tmp16 = triton_helpers.maximum(tmp15, tmp7)
tmp17 = tmp8 * tmp16
tmp18 = tmp0 - tmp3
tmp19 = tmp9 - tmp12
tmp20 = tmp18 * tmp19
tmp21 = tmp1 - tmp4
tmp22 = tmp10 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp20 + tmp23
tmp25 = tmp24 - tmp17
tmp26 = 9.999999974752427e-07
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp28 = tmp17 / tmp27
tmp29 = 1e-06
tmp30 = triton_helpers.maximum(tmp28, tmp29)
tmp31 = tl_math.log(tmp30)
tmp32 = -tmp31
tl.store(in_out_ptr0 + (x0), tmp32, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sub, sub_1, area1, sub_2, sub_3, area2, add, overlap, union, eps, union_1, ious, ious_1, log, loss], Original ATen: [aten.sub, aten.mul, aten.add, aten.lift_fresh, aten.maximum, aten.div, aten.clamp, aten.log, aten.neg]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_div_lift_fresh_log_maximum_mul_neg_sub_0.run(buf2, arg0_1, arg1_1, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
def fp16_clamp(x, min=None, max=None):
if not x.is_cuda and x.dtype == torch.float16:
return x.float().clamp(min, max).half()
return x.clamp(min, max)
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-06):
"""Calculate overlap between two set of bboxes.
FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889
Note:
Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',
there are some new generated variable when calculating IOU
using bbox_overlaps function:
1) is_aligned is False
area1: M x 1
area2: N x 1
lt: M x N x 2
rb: M x N x 2
wh: M x N x 2
overlap: M x N x 1
union: M x N x 1
ious: M x N x 1
Total memory:
S = (9 x N x M + N + M) * 4 Byte,
When using FP16, we can reduce:
R = (9 x N x M + N + M) * 4 / 2 Byte
R large than (N + M) * 4 * 2 is always true when N and M >= 1.
Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,
N + 1 < 3 * N, when N or M is 1.
Given M = 40 (ground truth), N = 400000 (three anchor boxes
in per grid, FPN, R-CNNs),
R = 275 MB (one times)
A special case (dense detection), M = 512 (ground truth),
R = 3516 MB = 3.43 GB
When the batch size is B, reduce:
B x R
Therefore, CUDA memory runs out frequently.
Experiments on GeForce RTX 2080Ti (11019 MiB):
| dtype | M | N | Use | Real | Ideal |
|:----:|:----:|:----:|:----:|:----:|:----:|
| FP32 | 512 | 400000 | 8020 MiB | -- | -- |
| FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB |
| FP32 | 40 | 400000 | 1540 MiB | -- | -- |
| FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB |
2) is_aligned is True
area1: N x 1
area2: N x 1
lt: N x 2
rb: N x 2
wh: N x 2
overlap: N x 1
union: N x 1
ious: N x 1
Total memory:
S = 11 x N * 4 Byte
When using FP16, we can reduce:
R = 11 x N * 4 / 2 Byte
So do the 'giou' (large than 'iou').
Time-wise, FP16 is generally faster than FP32.
When gpu_assign_thr is not -1, it takes more time on cpu
but not reduce memory.
There, we can reduce half the memory and keep the speed.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
siam_pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union), "iof" (intersection over
foreground) or "giou" (generalized intersection over union).
Default "iou".
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0
assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] -
bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] -
bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2])
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2])
rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[...,
None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[...,
None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
def iou_loss(pred, target, linear=False, eps=1e-06):
"""IoU loss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
linear (bool, optional): If True, use linear scale of loss instead of
log scale. Default: False.
eps (float): Eps to avoid log(0).
Return:
torch.Tensor: Loss tensor.
"""
ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
if linear:
loss = 1 - ious
else:
loss = -ious.log()
return loss
class IoULoss(nn.Module):
"""IoULoss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
Args:
linear (bool): If True, use linear scale of loss instead of log scale.
Default: False.
eps (float): Eps to avoid log(0).
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Weight of loss.
"""
def __init__(self, linear=False, eps=1e-06):
super(IoULoss, self).__init__()
self.linear = linear
self.eps = eps
def forward(self, pred, target):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
"""
loss = iou_loss(pred, target, self.linear, self.eps)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_div_lift_fresh_log_maximum_mul_neg_sub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp2 - tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp11 - tmp14
tmp16 = triton_helpers.maximum(tmp15, tmp7)
tmp17 = tmp8 * tmp16
tmp18 = tmp0 - tmp3
tmp19 = tmp9 - tmp12
tmp20 = tmp18 * tmp19
tmp21 = tmp1 - tmp4
tmp22 = tmp10 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp20 + tmp23
tmp25 = tmp24 - tmp17
tmp26 = 9.999999974752427e-07
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp28 = tmp17 / tmp27
tmp29 = 1e-06
tmp30 = triton_helpers.maximum(tmp28, tmp29)
tmp31 = tl_math.log(tmp30)
tmp32 = -tmp31
tl.store(in_out_ptr0 + x0, tmp32, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_clamp_div_lift_fresh_log_maximum_mul_neg_sub_0[
grid(64)](buf2, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del arg0_1
del arg1_1
return buf2,
def fp16_clamp(x, min=None, max=None):
if not x.is_cuda and x.dtype == torch.float16:
return x.float().clamp(min, max).half()
return x.clamp(min, max)
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-06):
"""Calculate overlap between two set of bboxes.
FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889
Note:
Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',
there are some new generated variable when calculating IOU
using bbox_overlaps function:
1) is_aligned is False
area1: M x 1
area2: N x 1
lt: M x N x 2
rb: M x N x 2
wh: M x N x 2
overlap: M x N x 1
union: M x N x 1
ious: M x N x 1
Total memory:
S = (9 x N x M + N + M) * 4 Byte,
When using FP16, we can reduce:
R = (9 x N x M + N + M) * 4 / 2 Byte
R large than (N + M) * 4 * 2 is always true when N and M >= 1.
Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,
N + 1 < 3 * N, when N or M is 1.
Given M = 40 (ground truth), N = 400000 (three anchor boxes
in per grid, FPN, R-CNNs),
R = 275 MB (one times)
A special case (dense detection), M = 512 (ground truth),
R = 3516 MB = 3.43 GB
When the batch size is B, reduce:
B x R
Therefore, CUDA memory runs out frequently.
Experiments on GeForce RTX 2080Ti (11019 MiB):
| dtype | M | N | Use | Real | Ideal |
|:----:|:----:|:----:|:----:|:----:|:----:|
| FP32 | 512 | 400000 | 8020 MiB | -- | -- |
| FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB |
| FP32 | 40 | 400000 | 1540 MiB | -- | -- |
| FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB |
2) is_aligned is True
area1: N x 1
area2: N x 1
lt: N x 2
rb: N x 2
wh: N x 2
overlap: N x 1
union: N x 1
ious: N x 1
Total memory:
S = 11 x N * 4 Byte
When using FP16, we can reduce:
R = 11 x N * 4 / 2 Byte
So do the 'giou' (large than 'iou').
Time-wise, FP16 is generally faster than FP32.
When gpu_assign_thr is not -1, it takes more time on cpu
but not reduce memory.
There, we can reduce half the memory and keep the speed.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
siam_pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union), "iof" (intersection over
foreground) or "giou" (generalized intersection over union).
Default "iou".
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0
assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] -
bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] -
bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2])
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2])
rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[...,
None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[...,
None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
def iou_loss(pred, target, linear=False, eps=1e-06):
"""IoU loss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
linear (bool, optional): If True, use linear scale of loss instead of
log scale. Default: False.
eps (float): Eps to avoid log(0).
Return:
torch.Tensor: Loss tensor.
"""
ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
if linear:
loss = 1 - ious
else:
loss = -ious.log()
return loss
class IoULossNew(nn.Module):
"""IoULoss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
Args:
linear (bool): If True, use linear scale of loss instead of log scale.
Default: False.
eps (float): Eps to avoid log(0).
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Weight of loss.
"""
def __init__(self, linear=False, eps=1e-06):
super(IoULossNew, self).__init__()
self.linear = linear
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zhangzhengde0225/SwinTrack | IoULoss | false | 16,797 | [
"MIT"
] | 143 | 526be17f8ef266cb924c6939bd8dda23e9b73249 | https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249 |
DIoULoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/jq/cjq5r6epnc7vmnbyflgtn3pkx3yrvnfkjg34iow656cvfqse6y7y.py
# Topologically Sorted Source Nodes: [sub_1, sub_2, ap, sub_3, sub_4, ag, add, overlap, sub_5, union, ious, add_4, add_5, sub_7, pow_3, left, add_6, add_7, sub_8, pow_4, right, rho2, pow_1, pow_2, add_2, c2, truediv_3, dious, loss], Original ATen: [aten.sub, aten.mul, aten.add, aten.div, aten.pow, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_2 => add_2
# add_4 => add_4
# add_5 => add_5
# add_6 => add_6
# add_7 => add_7
# ag => mul_2
# ap => mul_1
# c2 => add_3
# dious => sub_9
# ious => div
# left => div_1
# loss => sub_10
# overlap => mul
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# pow_4 => pow_4
# rho2 => add_8
# right => div_2
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# sub_4 => sub_4
# sub_5 => sub_5
# sub_7 => sub_7
# sub_8 => sub_8
# truediv_3 => div_3
# union => add_1
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_2, %select_3), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_4, %select_5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %sub_2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_6, %select_7), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_8, %select_9), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %sub_4), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %select_1), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_5, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_1), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_16, %select_18), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_12, %select_14), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %add_5), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_7, 2), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_3, 4), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_17, %select_19), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_13, %select_15), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_6, %add_7), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_8, 2), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_4, 4), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_1, %div_2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_10, 2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_11, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 1e-06), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_8, %add_3), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_3), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sub_9), kwargs = {})
triton_poi_fused_add_div_mul_pow_rsub_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_rsub_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp12 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp13 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp2 - tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp11 - tmp14
tmp16 = triton_helpers.maximum(tmp15, tmp7)
tmp17 = tmp8 * tmp16
tmp18 = tmp0 - tmp3
tmp19 = tmp9 - tmp12
tmp20 = tmp18 * tmp19
tmp21 = tmp1 - tmp4
tmp22 = tmp10 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp20 + tmp23
tmp25 = tmp24 - tmp17
tmp26 = tmp4 + tmp1
tmp27 = tmp3 + tmp0
tmp28 = tmp26 - tmp27
tmp29 = tmp28 * tmp28
tmp30 = 0.25
tmp31 = tmp29 * tmp30
tmp32 = tmp13 + tmp10
tmp33 = tmp12 + tmp9
tmp34 = tmp32 - tmp33
tmp35 = tmp34 * tmp34
tmp36 = tmp35 * tmp30
tmp37 = tmp31 + tmp36
tmp38 = triton_helpers.maximum(tmp0, tmp1)
tmp39 = triton_helpers.minimum(tmp3, tmp4)
tmp40 = tmp38 - tmp39
tmp41 = triton_helpers.maximum(tmp40, tmp7)
tmp42 = tmp41 * tmp41
tmp43 = triton_helpers.maximum(tmp9, tmp10)
tmp44 = triton_helpers.minimum(tmp12, tmp13)
tmp45 = tmp43 - tmp44
tmp46 = triton_helpers.maximum(tmp45, tmp7)
tmp47 = tmp46 * tmp46
tmp48 = tmp42 + tmp47
tmp49 = 1e-06
tmp50 = tmp48 + tmp49
tmp51 = tmp37 / tmp50
tmp52 = tmp25 + tmp49
tmp53 = tmp17 / tmp52
tmp54 = tmp53 - tmp51
tmp55 = 1.0
tmp56 = tmp55 - tmp54
tl.store(in_out_ptr0 + (x2), tmp56, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sub_1, sub_2, ap, sub_3, sub_4, ag, add, overlap, sub_5, union, ious, add_4, add_5, sub_7, pow_3, left, add_6, add_7, sub_8, pow_4, right, rho2, pow_1, pow_2, add_2, c2, truediv_3, dious, loss], Original ATen: [aten.sub, aten.mul, aten.add, aten.div, aten.pow, aten.rsub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_rsub_sub_0.run(buf3, arg0_1, arg1_1, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
def diou(pred, target, eps=1e-07):
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
ious = overlap / union
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw ** 2 + ch ** 2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
left = (b2_x1 + b2_x2 - (b1_x1 + b1_x2)) ** 2 / 4
right = (b2_y1 + b2_y2 - (b1_y1 + b1_y2)) ** 2 / 4
rho2 = left + right
dious = ious - rho2 / c2
return dious
def diou_loss(pred, target, eps=1e-07):
"""`Implementation of Distance-IoU Loss: Faster and Better
Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_.
Code is modified from https://github.com/Zzh-tju/DIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
dious = diou(pred, target, eps)
loss = 1 - dious
return loss
class DIoULoss(nn.Module):
def __init__(self, eps=1e-06):
super(DIoULoss, self).__init__()
self.eps = eps
def forward(self, pred, target):
return diou_loss(pred, target, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_rsub_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp2 - tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp11 - tmp14
tmp16 = triton_helpers.maximum(tmp15, tmp7)
tmp17 = tmp8 * tmp16
tmp18 = tmp0 - tmp3
tmp19 = tmp9 - tmp12
tmp20 = tmp18 * tmp19
tmp21 = tmp1 - tmp4
tmp22 = tmp10 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp20 + tmp23
tmp25 = tmp24 - tmp17
tmp26 = tmp4 + tmp1
tmp27 = tmp3 + tmp0
tmp28 = tmp26 - tmp27
tmp29 = tmp28 * tmp28
tmp30 = 0.25
tmp31 = tmp29 * tmp30
tmp32 = tmp13 + tmp10
tmp33 = tmp12 + tmp9
tmp34 = tmp32 - tmp33
tmp35 = tmp34 * tmp34
tmp36 = tmp35 * tmp30
tmp37 = tmp31 + tmp36
tmp38 = triton_helpers.maximum(tmp0, tmp1)
tmp39 = triton_helpers.minimum(tmp3, tmp4)
tmp40 = tmp38 - tmp39
tmp41 = triton_helpers.maximum(tmp40, tmp7)
tmp42 = tmp41 * tmp41
tmp43 = triton_helpers.maximum(tmp9, tmp10)
tmp44 = triton_helpers.minimum(tmp12, tmp13)
tmp45 = tmp43 - tmp44
tmp46 = triton_helpers.maximum(tmp45, tmp7)
tmp47 = tmp46 * tmp46
tmp48 = tmp42 + tmp47
tmp49 = 1e-06
tmp50 = tmp48 + tmp49
tmp51 = tmp37 / tmp50
tmp52 = tmp25 + tmp49
tmp53 = tmp17 / tmp52
tmp54 = tmp53 - tmp51
tmp55 = 1.0
tmp56 = tmp55 - tmp54
tl.store(in_out_ptr0 + x2, tmp56, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_rsub_sub_0[grid(64)](buf3, arg0_1,
arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf3,
def diou(pred, target, eps=1e-07):
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
ious = overlap / union
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw ** 2 + ch ** 2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
left = (b2_x1 + b2_x2 - (b1_x1 + b1_x2)) ** 2 / 4
right = (b2_y1 + b2_y2 - (b1_y1 + b1_y2)) ** 2 / 4
rho2 = left + right
dious = ious - rho2 / c2
return dious
def diou_loss(pred, target, eps=1e-07):
"""`Implementation of Distance-IoU Loss: Faster and Better
Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_.
Code is modified from https://github.com/Zzh-tju/DIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
dious = diou(pred, target, eps)
loss = 1 - dious
return loss
class DIoULossNew(nn.Module):
def __init__(self, eps=1e-06):
super(DIoULossNew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zhangzhengde0225/SwinTrack | DIoULoss | false | 16,798 | [
"MIT"
] | 143 | 526be17f8ef266cb924c6939bd8dda23e9b73249 | https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249 |
VarifocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/y5/cy5qcaqdeonhdf323xhgo6aes6lwgticqhkl6knwc2ilyxcdeqmw.py
# Topologically Sorted Source Nodes: [binary_cross_entropy, pred_sigmoid, gt, float_1, mul, sub, abs_1, pow_1, mul_1, le, float_2, mul_2, focal_weight, loss], Original ATen: [aten.binary_cross_entropy, aten.sigmoid, aten.gt, aten._to_copy, aten.mul, aten.sub, aten.abs, aten.pow, aten.le, aten.add]
# Source node to ATen node mapping:
# abs_1 => abs_1
# binary_cross_entropy => full_default, full_default_1, log, log1p, maximum, maximum_1, mul_3, mul_4, neg, sub_1, sub_2
# float_1 => convert_element_type
# float_2 => convert_element_type_1
# focal_weight => add
# gt => gt
# le => le
# loss => mul_5
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_1 => pow_1
# pred_sigmoid => sigmoid
# sub => sub
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, 1), kwargs = {})
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sigmoid,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %maximum), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sigmoid,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %maximum_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg1_1, 0.0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %convert_element_type), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.75), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%arg1_1, 0.0), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%le, torch.float32), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %convert_element_type_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_2), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %add), kwargs = {})
triton_poi_fused__to_copy_abs_add_binary_cross_entropy_gt_le_mul_pow_sigmoid_sub_0 = async_compile.triton('triton_poi_fused__to_copy_abs_add_binary_cross_entropy_gt_le_mul_pow_sigmoid_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_abs_add_binary_cross_entropy_gt_le_mul_pow_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_abs_add_binary_cross_entropy_gt_le_mul_pow_sigmoid_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = -tmp4
tmp6 = libdevice.log1p(tmp5)
tmp7 = -100.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp2 * tmp8
tmp10 = tl_math.log(tmp4)
tmp11 = triton_helpers.maximum(tmp10, tmp7)
tmp12 = tmp0 * tmp11
tmp13 = tmp9 - tmp12
tmp14 = 0.0
tmp15 = tmp0 > tmp14
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp0 * tmp16
tmp18 = tmp4 - tmp0
tmp19 = tl_math.abs(tmp18)
tmp20 = tmp19 * tmp19
tmp21 = 0.75
tmp22 = tmp20 * tmp21
tmp23 = tmp0 <= tmp14
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp22 * tmp24
tmp26 = tmp17 + tmp25
tmp27 = tmp13 * tmp26
tl.store(out_ptr0 + (x0), tmp27, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [binary_cross_entropy, pred_sigmoid, gt, float_1, mul, sub, abs_1, pow_1, mul_1, le, float_2, mul_2, focal_weight, loss], Original ATen: [aten.binary_cross_entropy, aten.sigmoid, aten.gt, aten._to_copy, aten.mul, aten.sub, aten.abs, aten.pow, aten.le, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_abs_add_binary_cross_entropy_gt_le_mul_pow_sigmoid_sub_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
def varifocal_loss(pred, target, alpha=0.75, gamma=2.0, iou_weighted=True,
use_sigmoid=True):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
"""
assert pred.size() == target.size()
if use_sigmoid:
pred_sigmoid = pred.sigmoid()
else:
pred_sigmoid = pred
target = target.type_as(pred)
if iou_weighted:
focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid
- target).abs().pow(gamma) * (target <= 0.0).float()
else:
focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target
).abs().pow(gamma) * (target <= 0.0).float()
loss = F.binary_cross_entropy(pred_sigmoid, target, reduction='none'
) * focal_weight
return loss
class VarifocalLoss(nn.Module):
def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0,
iou_weighted=True):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal
Loss. Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive examples with the iou target. Defaults to True.
"""
super(VarifocalLoss, self).__init__()
assert alpha >= 0.0
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.iou_weighted = iou_weighted
def forward(self, pred, target):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: The calculated loss
"""
return varifocal_loss(pred, target, alpha=self.alpha, gamma=self.
gamma, iou_weighted=self.iou_weighted, use_sigmoid=self.use_sigmoid
)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_abs_add_binary_cross_entropy_gt_le_mul_pow_sigmoid_sub_0(
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = -tmp4
tmp6 = libdevice.log1p(tmp5)
tmp7 = -100.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp2 * tmp8
tmp10 = tl_math.log(tmp4)
tmp11 = triton_helpers.maximum(tmp10, tmp7)
tmp12 = tmp0 * tmp11
tmp13 = tmp9 - tmp12
tmp14 = 0.0
tmp15 = tmp0 > tmp14
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp0 * tmp16
tmp18 = tmp4 - tmp0
tmp19 = tl_math.abs(tmp18)
tmp20 = tmp19 * tmp19
tmp21 = 0.75
tmp22 = tmp20 * tmp21
tmp23 = tmp0 <= tmp14
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp22 * tmp24
tmp26 = tmp17 + tmp25
tmp27 = tmp13 * tmp26
tl.store(out_ptr0 + x0, tmp27, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_abs_add_binary_cross_entropy_gt_le_mul_pow_sigmoid_sub_0[
grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del arg0_1
del arg1_1
return buf0,
def varifocal_loss(pred, target, alpha=0.75, gamma=2.0, iou_weighted=True,
use_sigmoid=True):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
"""
assert pred.size() == target.size()
if use_sigmoid:
pred_sigmoid = pred.sigmoid()
else:
pred_sigmoid = pred
target = target.type_as(pred)
if iou_weighted:
focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid
- target).abs().pow(gamma) * (target <= 0.0).float()
else:
focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target
).abs().pow(gamma) * (target <= 0.0).float()
loss = F.binary_cross_entropy(pred_sigmoid, target, reduction='none'
) * focal_weight
return loss
class VarifocalLossNew(nn.Module):
def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0,
iou_weighted=True):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal
Loss. Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive examples with the iou target. Defaults to True.
"""
super(VarifocalLossNew, self).__init__()
assert alpha >= 0.0
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.iou_weighted = iou_weighted
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zhangzhengde0225/SwinTrack | VarifocalLoss | false | 16,799 | [
"MIT"
] | 143 | 526be17f8ef266cb924c6939bd8dda23e9b73249 | https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249 |
CXLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/lo/cloo5jnc2uictteup22yoiirrdf5qm4ntgk52ngohgqkfjebcfid.py
# Topologically Sorted Source Nodes: [mean, mean_1], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# mean_1 => mean_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0], True), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [2], True), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + (16*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp10 = tl.load(in_ptr0 + (68 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr0 + (132 + x0 + (16*x1)), xmask)
tmp14 = tl.load(in_ptr0 + (196 + x0 + (16*x1)), xmask)
tmp18 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp19 = tl.load(in_ptr0 + (72 + x0 + (16*x1)), xmask)
tmp21 = tl.load(in_ptr0 + (136 + x0 + (16*x1)), xmask)
tmp23 = tl.load(in_ptr0 + (200 + x0 + (16*x1)), xmask)
tmp27 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp28 = tl.load(in_ptr0 + (76 + x0 + (16*x1)), xmask)
tmp30 = tl.load(in_ptr0 + (140 + x0 + (16*x1)), xmask)
tmp32 = tl.load(in_ptr0 + (204 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + (x2), tmp36, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/is/cis6s5vdzevpizfdakaf7ojmde6xth52c3hqviohqts6qoly7lsa.py
# Topologically Sorted Source Nodes: [meanT, featureI, featureT], Original ATen: [aten.mean, aten.sub]
# Source node to ATen node mapping:
# featureI => sub
# featureT => sub_1
# meanT => mean_2
# Graph fragment:
# %mean_2 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_1, [3], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mean_2), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean_2), kwargs = {})
triton_poi_fused_mean_sub_1 = async_compile.triton('triton_poi_fused_mean_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (x3), xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp12 = tmp11 - tmp9
tl.store(out_ptr0 + (x3), tmp10, xmask)
tl.store(out_ptr1 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wp/cwpinrdok6m4mv6ctokitero7thhi4yok2aezvojljzsq4bcorjl.py
# Topologically Sorted Source Nodes: [norms, features], Original ATen: [aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# features => div
# norms => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=4] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %pow_2), kwargs = {})
triton_poi_fused_div_linalg_vector_norm_2 = async_compile.triton('triton_poi_fused_div_linalg_vector_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_vector_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/nq/cnq6srggz57i3h3j2lxnqujaitqfxi7isiq46xzevqxrspuo7dsy.py
# Topologically Sorted Source Nodes: [dist_i], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# dist_i => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %permute, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + (16*y0)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bb/cbb2iqqfdkdo6ada5wsgbzsi6ay5vabbdmlzhc6672n5354uc6w7.py
# Topologically Sorted Source Nodes: [dist_i], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# dist_i => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %permute, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7k/c7kl77e5pl4mw5ixqk2gvbtpbq2ugcx2hamxsei6xu5rrqtkcy5v.py
# Topologically Sorted Source Nodes: [dist_i_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# dist_i_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_3, %permute_1, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (64 + x1 + (16*y0)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/u6/cu64gw5ceqi55nkqljelny7pbcfm6qebyjs5cvw5shkquhlaemtu.py
# Topologically Sorted Source Nodes: [dist_i_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# dist_i_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_3, %permute_1, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (64 + y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pb/cpbjmfszruv2bc7sxl2cwcgars5nyrzrv2ge43s32wyvhnq6vuin.py
# Topologically Sorted Source Nodes: [dist_i_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# dist_i_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_5, %permute_2, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (128 + x1 + (16*y0)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ig/cig2o2imtsl37frxcaabia5vdqodfxlxlsnx6v5bhih4ki4pscgw.py
# Topologically Sorted Source Nodes: [dist_i_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# dist_i_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_5, %permute_2, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_8 = async_compile.triton('triton_poi_fused_convolution_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (128 + y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vr/cvr4ld5wxk2ablwhxo4cegott2mkb2gujf3awrztjq5vutcf735i.py
# Topologically Sorted Source Nodes: [dist_i_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# dist_i_3 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_7, %permute_3, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_9 = async_compile.triton('triton_poi_fused_convolution_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (192 + x1 + (16*y0)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yl/cylobbelqpnmrkifdtbjvv3jpd4fifiuxyd2dd2zxcpqmgb3h66x.py
# Topologically Sorted Source Nodes: [dist_i_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# dist_i_3 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_7, %permute_3, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (192 + y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mm/cmm6efjzcszd7zlh2xk4w44af26jmvevkg7amrrwo2dpupwdu4mm.py
# Topologically Sorted Source Nodes: [dist, sub_2, raw_dist, min_1, add, relative_dist, sub_3, truediv_2, W, W_sum], Original ATen: [aten.cat, aten.rsub, aten.div, aten.min, aten.add, aten.exp, aten.sum]
# Source node to ATen node mapping:
# W => exp
# W_sum => sum_3
# add => add
# dist => cat
# min_1 => min_1
# raw_dist => div_2
# relative_dist => div_3
# sub_2 => sub_2
# sub_3 => sub_3
# truediv_2 => div_4
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution, %convolution_1, %convolution_2, %convolution_3],), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %cat), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, 2.0), kwargs = {})
# %min_1 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%div_2, 1, True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, %add), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div_3), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_3, 0.1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_4,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_per_fused_add_cat_div_exp_min_rsub_sum_11 = async_compile.triton('triton_per_fused_add_cat_div_exp_min_rsub_sum_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_cat_div_exp_min_rsub_sum_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_cat_div_exp_min_rsub_sum_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x1 = (xindex // 16)
r2 = rindex
x0 = xindex % 16
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (r2 + (16*x0)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1, 1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (r2 + (16*x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1, 1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (r2 + (16*x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1, 1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + (r2 + (16*x0)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tmp23 = 1.0
tmp24 = tmp23 - tmp22
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.where(xmask, tmp27, float("inf"))
tmp30 = triton_helpers.min2(tmp29, 1)[:, None]
tmp31 = 1e-05
tmp32 = tmp30 + tmp31
tmp33 = tmp26 / tmp32
tmp34 = tmp23 - tmp33
tmp35 = 10.0
tmp36 = tmp34 * tmp35
tmp37 = tl_math.exp(tmp36)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp40 = tl.where(xmask, tmp38, 0)
tmp41 = tl.sum(tmp40, 1)[:, None]
tl.store(out_ptr0 + (r2 + (16*x3)), tmp26, xmask)
tl.store(out_ptr1 + (x3), tmp30, xmask)
tl.store(out_ptr2 + (x3), tmp41, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ps/cpscwcbti2xpw5pmevdfp57xu53ndczikjphh2vbjocpgx62qie6.py
# Topologically Sorted Source Nodes: [add, relative_dist, sub_3, truediv_2, W, CX, max_1], Original ATen: [aten.add, aten.div, aten.rsub, aten.exp, aten.max]
# Source node to ATen node mapping:
# CX => div_5
# W => exp
# add => add
# max_1 => max_1
# relative_dist => div_3
# sub_3 => sub_3
# truediv_2 => div_4
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, %add), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div_3), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_3, 0.1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_4,), kwargs = {})
# %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_3), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div_5, 3), kwargs = {})
triton_poi_fused_add_div_exp_max_rsub_12 = async_compile.triton('triton_poi_fused_add_div_exp_max_rsub_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_exp_max_rsub_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_exp_max_rsub_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (4*x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp13 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp23 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp33 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = 1e-05
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = 10.0
tmp8 = tmp6 * tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 / tmp10
tmp14 = tmp13 + tmp2
tmp15 = tmp12 / tmp14
tmp16 = tmp5 - tmp15
tmp17 = tmp16 * tmp7
tmp18 = tl_math.exp(tmp17)
tmp20 = tmp18 / tmp19
tmp21 = triton_helpers.maximum(tmp11, tmp20)
tmp24 = tmp23 + tmp2
tmp25 = tmp22 / tmp24
tmp26 = tmp5 - tmp25
tmp27 = tmp26 * tmp7
tmp28 = tl_math.exp(tmp27)
tmp30 = tmp28 / tmp29
tmp31 = triton_helpers.maximum(tmp21, tmp30)
tmp34 = tmp33 + tmp2
tmp35 = tmp32 / tmp34
tmp36 = tmp5 - tmp35
tmp37 = tmp36 * tmp7
tmp38 = tl_math.exp(tmp37)
tmp40 = tmp38 / tmp39
tmp41 = triton_helpers.maximum(tmp31, tmp40)
tl.store(out_ptr0 + (x2), tmp41, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gt/cgtdcwqepvmdbwxfqkcxfii5zflfolepkl433efdrhcxtu3cfvqi.py
# Topologically Sorted Source Nodes: [max_2, CX_2], Original ATen: [aten.max, aten.mean]
# Source node to ATen node mapping:
# CX_2 => mean_3
# max_2 => max_2
# Graph fragment:
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%getitem_2, 2), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%getitem_4, [1]), kwargs = {})
triton_per_fused_max_mean_13 = async_compile.triton('triton_per_fused_max_mean_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_mean_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_max_mean_13(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (16 + r1 + (64*x0)), xmask, other=0.0)
tmp3 = tl.load(in_ptr0 + (32 + r1 + (64*x0)), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + (48 + r1 + (64*x0)), xmask, other=0.0)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vo/cvoirrcnpclvzs7z2r3mqk5m32yoeb26bj7zlsp4e3zc4m3ldoyf.py
# Topologically Sorted Source Nodes: [max_2, CX_2, log, CX_3, CX_4], Original ATen: [aten.max, aten.mean, aten.log, aten.neg]
# Source node to ATen node mapping:
# CX_2 => mean_3
# CX_3 => neg
# CX_4 => mean_4
# log => log
# max_2 => max_2
# Graph fragment:
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%getitem_2, 2), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%getitem_4, [1]), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%mean_3,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {})
# %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%neg,), kwargs = {})
triton_per_fused_log_max_mean_neg_14 = async_compile.triton('triton_per_fused_log_max_mean_neg_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_log_max_mean_neg_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_log_max_mean_neg_14(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 16.0
tmp2 = tmp0 / tmp1
tmp3 = tl_math.log(tmp2)
tmp4 = -tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 1, 4), (16, 4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, mean_1], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [meanT, featureI, featureT], Original ATen: [aten.mean, aten.sub]
triton_poi_fused_mean_sub_1.run(arg1_1, buf0, arg0_1, buf1, buf3, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norms, features], Original ATen: [aten.linalg_vector_norm, aten.div]
triton_poi_fused_div_linalg_vector_norm_2.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf4 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [norms_1, features_1], Original ATen: [aten.linalg_vector_norm, aten.div]
triton_poi_fused_div_linalg_vector_norm_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
buf5 = empty_strided_cuda((1, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Topologically Sorted Source Nodes: [dist_i], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf2, buf5, 4, 16, grid=grid(4, 16), stream=stream0)
buf6 = empty_strided_cuda((16, 4, 1, 1), (4, 1, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [dist_i], Original ATen: [aten.convolution]
triton_poi_fused_convolution_4.run(buf4, buf6, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [dist_i], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf5, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 16, 4, 4), (256, 1, 64, 16))
buf8 = reinterpret_tensor(buf6, (1, 4, 4, 4), (64, 1, 16, 4), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [dist_i_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf2, buf8, 4, 16, grid=grid(4, 16), stream=stream0)
buf9 = reinterpret_tensor(buf5, (16, 4, 1, 1), (4, 1, 4, 4), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [dist_i_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf4, buf9, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [dist_i_1], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf8, buf9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (1, 16, 4, 4), (256, 1, 64, 16))
buf11 = reinterpret_tensor(buf9, (1, 4, 4, 4), (64, 1, 16, 4), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [dist_i_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_7.run(buf2, buf11, 4, 16, grid=grid(4, 16), stream=stream0)
buf12 = reinterpret_tensor(buf8, (16, 4, 1, 1), (4, 1, 4, 4), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [dist_i_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_8.run(buf4, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [dist_i_2], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf11, buf12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (1, 16, 4, 4), (256, 1, 64, 16))
buf14 = reinterpret_tensor(buf12, (1, 4, 4, 4), (64, 1, 16, 4), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [dist_i_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_9.run(buf2, buf14, 4, 16, grid=grid(4, 16), stream=stream0)
del buf2
buf15 = reinterpret_tensor(buf11, (16, 4, 1, 1), (4, 1, 4, 4), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [dist_i_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_10.run(buf4, buf15, 16, 4, grid=grid(16, 4), stream=stream0)
del buf4
# Topologically Sorted Source Nodes: [dist_i_3], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf14, buf15, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (1, 16, 4, 4), (256, 1, 64, 16))
buf17 = empty_strided_cuda((4, 16, 4, 4), (256, 1, 64, 16), torch.float32)
buf18 = reinterpret_tensor(buf15, (4, 1, 4, 4), (16, 64, 4, 1), 0); del buf15 # reuse
buf20 = reinterpret_tensor(buf14, (4, 1, 4, 4), (16, 64, 4, 1), 0); del buf14 # reuse
# Topologically Sorted Source Nodes: [dist, sub_2, raw_dist, min_1, add, relative_dist, sub_3, truediv_2, W, W_sum], Original ATen: [aten.cat, aten.rsub, aten.div, aten.min, aten.add, aten.exp, aten.sum]
triton_per_fused_add_cat_div_exp_min_rsub_sum_11.run(buf7, buf10, buf13, buf16, buf17, buf18, buf20, 64, 16, grid=grid(64), stream=stream0)
del buf10
del buf13
del buf16
buf21 = reinterpret_tensor(buf7, (4, 16, 4), (64, 1, 16), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [add, relative_dist, sub_3, truediv_2, W, CX, max_1], Original ATen: [aten.add, aten.div, aten.rsub, aten.exp, aten.max]
triton_poi_fused_add_div_exp_max_rsub_12.run(buf17, buf18, buf20, buf21, 256, grid=grid(256), stream=stream0)
del buf17
del buf18
del buf20
buf22 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [max_2, CX_2], Original ATen: [aten.max, aten.mean]
triton_per_fused_max_mean_13.run(buf21, buf22, 4, 16, grid=grid(4), stream=stream0)
del buf21
buf23 = empty_strided_cuda((), (), torch.float32)
buf24 = buf23; del buf23 # reuse
# Topologically Sorted Source Nodes: [max_2, CX_2, log, CX_3, CX_4], Original ATen: [aten.max, aten.mean, aten.log, aten.neg]
triton_per_fused_log_max_mean_neg_14.run(buf24, buf22, 1, 4, grid=grid(1), stream=stream0)
del buf22
return (buf24, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
class CXLoss(nn.Module):
def __init__(self, sigma=0.1, b=1.0, similarity='consine'):
super(CXLoss, self).__init__()
self.similarity = similarity
self.sigma = sigma
self.b = b
def center_by_T(self, featureI, featureT):
meanT = featureT.mean(0, keepdim=True).mean(2, keepdim=True).mean(3,
keepdim=True)
return featureI - meanT, featureT - meanT
def l2_normalize_channelwise(self, features):
norms = features.norm(p=2, dim=1, keepdim=True)
features = features.div(norms)
return features
def patch_decomposition(self, features):
N, C, H, W = features.shape
assert N == 1
P = H * W
patches = features.view(1, 1, C, P).permute((3, 2, 0, 1))
return patches
def calc_relative_distances(self, raw_dist, axis=1):
epsilon = 1e-05
div = torch.min(raw_dist, dim=axis, keepdim=True)[0]
relative_dist = raw_dist / (div + epsilon)
return relative_dist
def calc_CX(self, dist, axis=1):
W = torch.exp((self.b - dist) / self.sigma)
W_sum = W.sum(dim=axis, keepdim=True)
return W.div(W_sum)
def forward(self, featureT, featureI):
"""
:param featureT: target
:param featureI: inference
:return:
"""
featureI, featureT = self.center_by_T(featureI, featureT)
featureI = self.l2_normalize_channelwise(featureI)
featureT = self.l2_normalize_channelwise(featureT)
dist = []
N = featureT.size()[0]
for i in range(N):
featureT_i = featureT[i, :, :, :].unsqueeze(0)
featureI_i = featureI[i, :, :, :].unsqueeze(0)
featureT_patch = self.patch_decomposition(featureT_i)
dist_i = F.conv2d(featureI_i, featureT_patch)
dist.append(dist_i)
dist = torch.cat(dist, dim=0)
raw_dist = (1.0 - dist) / 2.0
relative_dist = self.calc_relative_distances(raw_dist)
CX = self.calc_CX(relative_dist)
CX = CX.max(dim=3)[0].max(dim=2)[0]
CX = CX.mean(1)
CX = -torch.log(CX)
CX = torch.mean(CX)
return CX
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (68 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (132 + x0 + 16 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (196 + x0 + 16 * x1), xmask)
tmp18 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (72 + x0 + 16 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (136 + x0 + 16 * x1), xmask)
tmp23 = tl.load(in_ptr0 + (200 + x0 + 16 * x1), xmask)
tmp27 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp28 = tl.load(in_ptr0 + (76 + x0 + 16 * x1), xmask)
tmp30 = tl.load(in_ptr0 + (140 + x0 + 16 * x1), xmask)
tmp32 = tl.load(in_ptr0 + (204 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + x2, tmp36, xmask)
@triton.jit
def triton_poi_fused_mean_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + x3, xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp12 = tmp11 - tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
tl.store(out_ptr1 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_4(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (64 + x1 + 16 * y0), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_6(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (64 + y0 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_7(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (128 + x1 + 16 * y0), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_8(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (128 + y0 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_9(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (192 + x1 + 16 * y0), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_10(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (192 + y0 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_per_fused_add_cat_div_exp_min_rsub_sum_11(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x1 = xindex // 16
r2 = rindex
x0 = xindex % 16
x3 = xindex
tmp0 = x1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (r2 + 16 * x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1, 1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (r2 + 16 * x0), tmp9 & xmask, eviction_policy
='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1, 1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (r2 + 16 * x0), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1, 1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + (r2 + 16 * x0), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tmp23 = 1.0
tmp24 = tmp23 - tmp22
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.where(xmask, tmp27, float('inf'))
tmp30 = triton_helpers.min2(tmp29, 1)[:, None]
tmp31 = 1e-05
tmp32 = tmp30 + tmp31
tmp33 = tmp26 / tmp32
tmp34 = tmp23 - tmp33
tmp35 = 10.0
tmp36 = tmp34 * tmp35
tmp37 = tl_math.exp(tmp36)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp40 = tl.where(xmask, tmp38, 0)
tmp41 = tl.sum(tmp40, 1)[:, None]
tl.store(out_ptr0 + (r2 + 16 * x3), tmp26, xmask)
tl.store(out_ptr1 + x3, tmp30, xmask)
tl.store(out_ptr2 + x3, tmp41, xmask)
@triton.jit
def triton_poi_fused_add_div_exp_max_rsub_12(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp33 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp39 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = 1e-05
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = 10.0
tmp8 = tmp6 * tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 / tmp10
tmp14 = tmp13 + tmp2
tmp15 = tmp12 / tmp14
tmp16 = tmp5 - tmp15
tmp17 = tmp16 * tmp7
tmp18 = tl_math.exp(tmp17)
tmp20 = tmp18 / tmp19
tmp21 = triton_helpers.maximum(tmp11, tmp20)
tmp24 = tmp23 + tmp2
tmp25 = tmp22 / tmp24
tmp26 = tmp5 - tmp25
tmp27 = tmp26 * tmp7
tmp28 = tl_math.exp(tmp27)
tmp30 = tmp28 / tmp29
tmp31 = triton_helpers.maximum(tmp21, tmp30)
tmp34 = tmp33 + tmp2
tmp35 = tmp32 / tmp34
tmp36 = tmp5 - tmp35
tmp37 = tmp36 * tmp7
tmp38 = tl_math.exp(tmp37)
tmp40 = tmp38 / tmp39
tmp41 = triton_helpers.maximum(tmp31, tmp40)
tl.store(out_ptr0 + x2, tmp41, xmask)
@triton.jit
def triton_per_fused_max_mean_13(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp3 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_log_max_mean_neg_14(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 16.0
tmp2 = tmp0 / tmp1
tmp3 = tl_math.log(tmp2)
tmp4 = -tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 1, 4), (16, 4, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mean_sub_1[grid(256)](arg1_1, buf0, arg0_1, buf1,
buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_linalg_vector_norm_2[grid(256)](buf1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = buf1
del buf1
triton_poi_fused_div_linalg_vector_norm_2[grid(256)](buf3, buf4,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((1, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_convolution_3[grid(4, 16)](buf2, buf5, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 1, 1), (4, 1, 4, 4), torch.float32)
triton_poi_fused_convolution_4[grid(16, 4)](buf4, buf6, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf7 = extern_kernels.convolution(buf5, buf6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 16, 4, 4), (256, 1, 64, 16))
buf8 = reinterpret_tensor(buf6, (1, 4, 4, 4), (64, 1, 16, 4), 0)
del buf6
triton_poi_fused_convolution_5[grid(4, 16)](buf2, buf8, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (16, 4, 1, 1), (4, 1, 4, 4), 0)
del buf5
triton_poi_fused_convolution_6[grid(16, 4)](buf4, buf9, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf10 = extern_kernels.convolution(buf8, buf9, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (1, 16, 4, 4), (256, 1, 64, 16))
buf11 = reinterpret_tensor(buf9, (1, 4, 4, 4), (64, 1, 16, 4), 0)
del buf9
triton_poi_fused_convolution_7[grid(4, 16)](buf2, buf11, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf8, (16, 4, 1, 1), (4, 1, 4, 4), 0)
del buf8
triton_poi_fused_convolution_8[grid(16, 4)](buf4, buf12, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (1, 16, 4, 4), (256, 1, 64, 16))
buf14 = reinterpret_tensor(buf12, (1, 4, 4, 4), (64, 1, 16, 4), 0)
del buf12
triton_poi_fused_convolution_9[grid(4, 16)](buf2, buf14, 4, 16,
XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1)
del buf2
buf15 = reinterpret_tensor(buf11, (16, 4, 1, 1), (4, 1, 4, 4), 0)
del buf11
triton_poi_fused_convolution_10[grid(16, 4)](buf4, buf15, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf4
buf16 = extern_kernels.convolution(buf14, buf15, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (1, 16, 4, 4), (256, 1, 64, 16))
buf17 = empty_strided_cuda((4, 16, 4, 4), (256, 1, 64, 16), torch.
float32)
buf18 = reinterpret_tensor(buf15, (4, 1, 4, 4), (16, 64, 4, 1), 0)
del buf15
buf20 = reinterpret_tensor(buf14, (4, 1, 4, 4), (16, 64, 4, 1), 0)
del buf14
triton_per_fused_add_cat_div_exp_min_rsub_sum_11[grid(64)](buf7,
buf10, buf13, buf16, buf17, buf18, buf20, 64, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf10
del buf13
del buf16
buf21 = reinterpret_tensor(buf7, (4, 16, 4), (64, 1, 16), 0)
del buf7
triton_poi_fused_add_div_exp_max_rsub_12[grid(256)](buf17, buf18,
buf20, buf21, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf17
del buf18
del buf20
buf22 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_max_mean_13[grid(4)](buf21, buf22, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf21
buf23 = empty_strided_cuda((), (), torch.float32)
buf24 = buf23
del buf23
triton_per_fused_log_max_mean_neg_14[grid(1)](buf24, buf22, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
del buf22
return buf24,
class CXLossNew(nn.Module):
def __init__(self, sigma=0.1, b=1.0, similarity='consine'):
super(CXLossNew, self).__init__()
self.similarity = similarity
self.sigma = sigma
self.b = b
def center_by_T(self, featureI, featureT):
meanT = featureT.mean(0, keepdim=True).mean(2, keepdim=True).mean(3,
keepdim=True)
return featureI - meanT, featureT - meanT
def l2_normalize_channelwise(self, features):
norms = features.norm(p=2, dim=1, keepdim=True)
features = features.div(norms)
return features
def patch_decomposition(self, features):
N, C, H, W = features.shape
assert N == 1
P = H * W
patches = features.view(1, 1, C, P).permute((3, 2, 0, 1))
return patches
def calc_relative_distances(self, raw_dist, axis=1):
epsilon = 1e-05
div = torch.min(raw_dist, dim=axis, keepdim=True)[0]
relative_dist = raw_dist / (div + epsilon)
return relative_dist
def calc_CX(self, dist, axis=1):
W = torch.exp((self.b - dist) / self.sigma)
W_sum = W.sum(dim=axis, keepdim=True)
return W.div(W_sum)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| yizhiwang96/deepvecfont | CXLoss | false | 16,800 | [
"MIT"
] | 68 | 3ba4adb0406f16a6f387c5e12dd12286c9c341e8 | https://github.com/yizhiwang96/deepvecfont/tree/3ba4adb0406f16a6f387c5e12dd12286c9c341e8 |
MaskedMHCA | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/u5/cu5pjk6p5h3lbz7erkdrea6evciintej63fiv4a3ekev4m64i5ou.py
# Topologically Sorted Source Nodes: [out_conv_1, mu, res_x, pow_1, sigma, out_conv_3, mu_1, res_x_1, pow_2, sigma_1, out_conv_5, mu_2, res_x_2, pow_3, sigma_2], Original ATen: [aten.mul, aten.mean, aten.sub, aten.pow]
# Source node to ATen node mapping:
# mu => mean
# mu_1 => mean_2
# mu_2 => mean_4
# out_conv_1 => mul
# out_conv_3 => mul_2
# out_conv_5 => mul_4
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# res_x => sub
# res_x_1 => sub_1
# res_x_2 => sub_2
# sigma => mean_1
# sigma_1 => mean_3
# sigma_2 => mean_5
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %primals_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1], True), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, %primals_3), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul_2, [1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mean_2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_2, [1], True), kwargs = {})
# %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, %primals_3), kwargs = {})
# %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul_4, [1], True), kwargs = {})
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, %mean_4), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mean_5 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_3, [1], True), kwargs = {})
triton_poi_fused_mean_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_mean_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask)
tmp29 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask)
tmp31 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask)
tmp34 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask)
tmp37 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask)
tmp53 = tl.load(in_ptr3 + (x0 + (16*x1)), xmask)
tmp55 = tl.load(in_ptr3 + (4 + x0 + (16*x1)), xmask)
tmp58 = tl.load(in_ptr3 + (8 + x0 + (16*x1)), xmask)
tmp61 = tl.load(in_ptr3 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tmp30 = tmp29 * tmp1
tmp32 = tmp31 * tmp4
tmp33 = tmp30 + tmp32
tmp35 = tmp34 * tmp8
tmp36 = tmp33 + tmp35
tmp38 = tmp37 * tmp12
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp15
tmp41 = tmp30 - tmp40
tmp42 = tmp41 * tmp41
tmp43 = tmp32 - tmp40
tmp44 = tmp43 * tmp43
tmp45 = tmp42 + tmp44
tmp46 = tmp35 - tmp40
tmp47 = tmp46 * tmp46
tmp48 = tmp45 + tmp47
tmp49 = tmp38 - tmp40
tmp50 = tmp49 * tmp49
tmp51 = tmp48 + tmp50
tmp52 = tmp51 / tmp15
tmp54 = tmp53 * tmp1
tmp56 = tmp55 * tmp4
tmp57 = tmp54 + tmp56
tmp59 = tmp58 * tmp8
tmp60 = tmp57 + tmp59
tmp62 = tmp61 * tmp12
tmp63 = tmp60 + tmp62
tmp64 = tmp63 / tmp15
tmp65 = tmp54 - tmp64
tmp66 = tmp65 * tmp65
tmp67 = tmp56 - tmp64
tmp68 = tmp67 * tmp67
tmp69 = tmp66 + tmp68
tmp70 = tmp59 - tmp64
tmp71 = tmp70 * tmp70
tmp72 = tmp69 + tmp71
tmp73 = tmp62 - tmp64
tmp74 = tmp73 * tmp73
tmp75 = tmp72 + tmp74
tmp76 = tmp75 / tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp28, xmask)
tl.store(out_ptr2 + (x2), tmp40, xmask)
tl.store(out_ptr3 + (x2), tmp52, xmask)
tl.store(out_ptr4 + (x2), tmp64, xmask)
tl.store(out_ptr5 + (x2), tmp76, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4m/c4mxaygieaqmw3cjrfoftt72kpphuhwi2m54f4zi2hxkpegfquld.py
# Topologically Sorted Source Nodes: [out_conv_1, out_mask_1, mu, res_x, add, sqrt, out, out_1, out_2, out_conv_3, mu_1, res_x_1, add_1, sqrt_1, out_3, out_4, out_5, out_conv_5, mu_2, res_x_2, add_2, sqrt_2, out_6, out_7, out_8], Original ATen: [aten.mul, aten._to_copy, aten.mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_2
# add_2 => add_4
# mu => mean
# mu_1 => mean_2
# mu_2 => mean_4
# out => div
# out_1 => mul_1
# out_2 => add_1
# out_3 => div_1
# out_4 => mul_3
# out_5 => add_3
# out_6 => div_2
# out_7 => mul_5
# out_8 => add_5
# out_conv_1 => mul
# out_conv_3 => mul_2
# out_conv_5 => mul_4
# out_mask_1 => convert_element_type
# res_x => sub
# res_x_1 => sub_1
# res_x_2 => sub_2
# sqrt => sqrt
# sqrt_1 => sqrt_1
# sqrt_2 => sqrt_2
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %primals_3), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%primals_3, torch.bool), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_4), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, %primals_3), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul_2, [1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mean_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_3, 1e-05), kwargs = {})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_2,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %primals_7), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_8), kwargs = {})
# %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, %primals_3), kwargs = {})
# %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul_4, [1], True), kwargs = {})
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, %mean_4), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_5, 1e-05), kwargs = {})
# %sqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_4,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %sqrt_2), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %primals_10), kwargs = {})
# %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_11), kwargs = {})
triton_poi_fused__to_copy_add_div_mean_mul_sqrt_sub_1 = async_compile.triton('triton_poi_fused__to_copy_add_div_mean_mul_sqrt_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*i1', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_div_mean_mul_sqrt_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_div_mean_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 4
x3 = (xindex // 16)
x2 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (x1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + (x0), xmask)
tmp17 = tl.load(in_ptr7 + (x1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr8 + (x1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr9 + (x2), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr10 + (x2), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr11 + (x0), xmask)
tmp29 = tl.load(in_ptr12 + (x1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr13 + (x1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr14 + (x2), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr15 + (x2), xmask, eviction_policy='evict_last')
tmp1 = (tmp0 != 0)
tmp3 = tmp2 * tmp0
tmp5 = tmp3 - tmp4
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp15 * tmp0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 + tmp7
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp18 / tmp21
tmp24 = tmp22 * tmp23
tmp26 = tmp24 + tmp25
tmp28 = tmp27 * tmp0
tmp30 = tmp28 - tmp29
tmp32 = tmp31 + tmp7
tmp33 = libdevice.sqrt(tmp32)
tmp34 = tmp30 / tmp33
tmp36 = tmp34 * tmp35
tmp38 = tmp36 + tmp37
tl.store(out_ptr0 + (x0), tmp1, xmask)
tl.store(out_ptr1 + (x0), tmp14, xmask)
tl.store(out_ptr2 + (x0), tmp26, xmask)
tl.store(out_ptr3 + (x0), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fz/cfze62tmf6jlzwq2qwbkiaurhvygtjeljcevjdsxqh265dlff77v.py
# Topologically Sorted Source Nodes: [mul_3], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_3 => mul_6
# Graph fragment:
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_1, 1.0), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/br/cbrspj634eg2bbs7vljhmrm74hrxshk4iwow2n6gl43bcvltbocl.py
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# k => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_3, %primals_14, %primals_15, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/iz/cizcxhi4gu22gg6yokh2dw3lkjtdxi5ndguiyyncnup2o3duc33a.py
# Topologically Sorted Source Nodes: [logical_not, att_1, att_2], Original ATen: [aten.logical_not, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# att_1 => full_default, where
# att_2 => amax, exp, sub_3, sum_1
# logical_not => logical_not
# Graph fragment:
# %logical_not : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%unsqueeze,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not, %full_default, %view_5), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_logical_not_masked_fill_4 = async_compile.triton('triton_poi_fused__softmax_logical_not_masked_fill_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_logical_not_masked_fill_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_logical_not_masked_fill_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp1 = (tmp0 != 0)
tmp2 = tmp1 == 0
tmp4 = float("-inf")
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = (tmp6 != 0)
tmp8 = tmp7 == 0
tmp10 = tl.where(tmp8, tmp4, tmp9)
tmp11 = triton_helpers.maximum(tmp5, tmp10)
tmp13 = (tmp12 != 0)
tmp14 = tmp13 == 0
tmp16 = tl.where(tmp14, tmp4, tmp15)
tmp17 = triton_helpers.maximum(tmp11, tmp16)
tmp19 = (tmp18 != 0)
tmp20 = tmp19 == 0
tmp22 = tl.where(tmp20, tmp4, tmp21)
tmp23 = triton_helpers.maximum(tmp17, tmp22)
tmp24 = tmp5 - tmp23
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp10 - tmp23
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp16 - tmp23
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tmp22 - tmp23
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tl.store(out_ptr0 + (x2), tmp23, xmask)
tl.store(out_ptr1 + (x2), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ts/ctsaeofj4fmabedbtsjwx5qktf2xbnay5oplfrhqkujdefswdrrc.py
# Topologically Sorted Source Nodes: [logical_not, att_1, att_2], Original ATen: [aten.logical_not, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# att_1 => full_default, where
# att_2 => amax, div_3, exp, sub_3
# logical_not => logical_not
# Graph fragment:
# %logical_not : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%unsqueeze,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not, %full_default, %view_5), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_logical_not_masked_fill_5 = async_compile.triton('triton_poi_fused__softmax_logical_not_masked_fill_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_logical_not_masked_fill_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_logical_not_masked_fill_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex
x4 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr0 + (x3), xmask)
tmp6 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp1 = (tmp0 != 0)
tmp2 = tmp1 == 0
tmp4 = float("-inf")
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jd/cjdu5k276mjm24qtisxjzsj6riwbgpdl36vlm3p3yat3fjew2abk.py
# Topologically Sorted Source Nodes: [float_4, mul_4], Original ATen: [aten._to_copy, aten.mul]
# Source node to ATen node mapping:
# float_4 => convert_element_type_3
# mul_4 => mul_7
# Graph fragment:
# %convert_element_type_3 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%unsqueeze_1, torch.float32), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_2, %convert_element_type_3), kwargs = {})
triton_poi_fused__to_copy_mul_6 = async_compile.triton('triton_poi_fused__to_copy_mul_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_mul_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_mul_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = (tmp3 != 0)
tmp5 = tmp4.to(tl.float32)
tmp6 = tmp2 * tmp5
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/z6/cz6lr2cndudnvnvwkehucmozouvwr3xi2qzvdezeb36at4qukscj.py
# Topologically Sorted Source Nodes: [conv1d_6, float_5, out_11], Original ATen: [aten.convolution, aten._to_copy, aten.mul]
# Source node to ATen node mapping:
# conv1d_6 => convolution_6
# float_5 => convert_element_type_4
# out_11 => mul_8
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_9, %primals_18, %primals_19, [1], [0], [1], False, [0], 1), kwargs = {})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%convert_element_type, torch.float32), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, %convert_element_type_4), kwargs = {})
triton_poi_fused__to_copy_convolution_mul_7 = async_compile.triton('triton_poi_fused__to_copy_convolution_mul_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_convolution_mul_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_convolution_mul_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask).to(tl.int1)
tmp2 = tmp0 + tmp1
tmp4 = tmp3.to(tl.float32)
tmp5 = tmp2 * tmp4
tl.store(in_out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1, 3), (3, 3, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_6, (4, 1, 3), (3, 3, 1))
assert_size_stride(primals_7, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_8, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_9, (4, 1, 3), (3, 3, 1))
assert_size_stride(primals_10, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_11, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_12, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_17, (4, ), (1, ))
assert_size_stride(primals_18, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_19, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out_conv], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
# Topologically Sorted Source Nodes: [out_conv_2], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(primals_1, primals_6, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None)
assert_size_stride(buf5, (4, 4, 4), (16, 4, 1))
# Topologically Sorted Source Nodes: [out_conv_4], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(primals_1, primals_9, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None)
assert_size_stride(buf9, (4, 4, 4), (16, 4, 1))
buf2 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf3 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf7 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf10 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf11 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_conv_1, mu, res_x, pow_1, sigma, out_conv_3, mu_1, res_x_1, pow_2, sigma_1, out_conv_5, mu_2, res_x_2, pow_3, sigma_2], Original ATen: [aten.mul, aten.mean, aten.sub, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_mul_pow_sub_0.run(buf0, primals_3, buf5, buf9, buf2, buf3, buf6, buf7, buf10, buf11, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_conv_1, out_mask_1, mu, res_x, add, sqrt, out, out_1, out_2, out_conv_3, mu_1, res_x_1, add_1, sqrt_1, out_3, out_4, out_5, out_conv_5, mu_2, res_x_2, add_2, sqrt_2, out_6, out_7, out_8], Original ATen: [aten.mul, aten._to_copy, aten.mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_poi_fused__to_copy_add_div_mean_mul_sqrt_sub_1.run(primals_3, buf0, buf2, buf3, primals_4, primals_5, buf5, buf6, buf7, primals_7, primals_8, buf9, buf10, buf11, primals_10, primals_11, buf1, buf4, buf8, buf12, 64, grid=grid(64), stream=stream0)
del buf10
del buf11
del buf2
del buf3
del buf6
del buf7
del primals_11
del primals_5
del primals_8
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf4, primals_12, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4), (16, 4, 1))
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf8, primals_14, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 4), (16, 4, 1))
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf12, primals_16, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf15, (4, 4, 4), (16, 4, 1))
buf16 = reinterpret_tensor(buf13, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [mul_3], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(buf16, primals_13, 64, grid=grid(64), stream=stream0)
del primals_13
buf17 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf17, primals_15, 64, grid=grid(64), stream=stream0)
del primals_15
buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [att], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf16, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf17, (16, 1, 4), (4, 0, 1), 0), out=buf18)
buf19 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf20 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [logical_not, att_1, att_2], Original ATen: [aten.logical_not, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_logical_not_masked_fill_4.run(primals_3, buf18, buf19, buf20, 64, grid=grid(64), stream=stream0)
buf21 = reinterpret_tensor(buf18, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [logical_not, att_1, att_2], Original ATen: [aten.logical_not, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_logical_not_masked_fill_5.run(buf21, primals_3, buf19, buf20, 256, grid=grid(256), stream=stream0)
del buf19
buf22 = reinterpret_tensor(buf15, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf15 # reuse
# Topologically Sorted Source Nodes: [float_4, mul_4], Original ATen: [aten._to_copy, aten.mul]
triton_poi_fused__to_copy_mul_6.run(buf22, primals_17, primals_3, 64, grid=grid(64), stream=stream0)
del primals_17
buf23 = reinterpret_tensor(buf20, (16, 4, 1), (4, 1, 1), 0); del buf20 # reuse
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 0), 0), out=buf23)
# Topologically Sorted Source Nodes: [conv1d_6], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(reinterpret_tensor(buf23, (4, 4, 4), (16, 4, 1), 0), primals_18, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf24, (4, 4, 4), (16, 4, 1))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [conv1d_6, float_5, out_11], Original ATen: [aten.convolution, aten._to_copy, aten.mul]
triton_poi_fused__to_copy_convolution_mul_7.run(buf25, primals_19, buf1, 64, grid=grid(64), stream=stream0)
del primals_19
return (buf25, buf1, primals_1, primals_2, primals_3, primals_4, primals_6, primals_7, primals_9, primals_10, primals_12, primals_14, primals_16, primals_18, buf0, buf1, buf4, buf5, buf8, buf9, buf12, buf21, reinterpret_tensor(buf23, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf22, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf16, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf17, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 3), (3, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 1, 3), (3, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 1, 3), (3, 3, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.utils.data
from torch.nn import functional as F
class LayerNorm(nn.Module):
"""
LayerNorm that supports inputs of size B, C, T
"""
def __init__(self, num_channels, eps=1e-05, affine=True, device=None,
dtype=None):
super().__init__()
factory_kwargs = {'device': device, 'dtype': dtype}
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.ones([1, num_channels, 1], **
factory_kwargs))
self.bias = nn.Parameter(torch.zeros([1, num_channels, 1], **
factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
assert x.dim() == 3
assert x.shape[1] == self.num_channels
mu = torch.mean(x, dim=1, keepdim=True)
res_x = x - mu
sigma = torch.mean(res_x ** 2, dim=1, keepdim=True)
out = res_x / torch.sqrt(sigma + self.eps)
if self.affine:
out *= self.weight
out += self.bias
return out
class MaskedConv1D(nn.Module):
"""
Masked 1D convolution. Interface remains the same as Conv1d.
Only support a sub set of 1d convs
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
super().__init__()
assert kernel_size % 2 == 1 and kernel_size // 2 == padding
self.stride = stride
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias, padding_mode)
if bias:
torch.nn.init.constant_(self.conv.bias, 0.0)
def forward(self, x, mask):
_B, _C, T = x.size()
assert T % self.stride == 0
out_conv = self.conv(x)
if self.stride > 1:
out_mask = F.interpolate(mask.float(), size=T // self.stride,
mode='nearest')
else:
out_mask = mask.float()
out_conv = out_conv * out_mask.detach()
out_mask = out_mask.bool()
return out_conv, out_mask
class MaskedMHCA(nn.Module):
"""
Multi Head Conv Attention with mask
Add a depthwise convolution within a standard MHA
The extra conv op can be used to
(1) encode relative position information (relacing position encoding);
(2) downsample the features if needed;
(3) match the feature channels
Note: With current implementation, the downsampled feature will be aligned
to every s+1 time step, where s is the downsampling stride. This allows us
to easily interpolate the corresponding positional embeddings.
Modified from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
def __init__(self, n_embd, n_head, n_qx_stride=1, n_kv_stride=1,
attn_pdrop=0.0, proj_pdrop=0.0):
super().__init__()
assert n_embd % n_head == 0
self.n_embd = n_embd
self.n_head = n_head
self.n_channels = n_embd // n_head
self.scale = 1.0 / math.sqrt(self.n_channels)
assert n_qx_stride == 1 or n_qx_stride % 2 == 0
assert n_kv_stride == 1 or n_kv_stride % 2 == 0
self.n_qx_stride = n_qx_stride
self.n_kv_stride = n_kv_stride
kernel_size = self.n_qx_stride + 1 if self.n_qx_stride > 1 else 3
stride, padding = self.n_kv_stride, kernel_size // 2
self.query_conv = MaskedConv1D(self.n_embd, self.n_embd,
kernel_size, stride=stride, padding=padding, groups=self.n_embd,
bias=False)
self.query_norm = LayerNorm(self.n_embd)
kernel_size = self.n_kv_stride + 1 if self.n_kv_stride > 1 else 3
stride, padding = self.n_kv_stride, kernel_size // 2
self.key_conv = MaskedConv1D(self.n_embd, self.n_embd, kernel_size,
stride=stride, padding=padding, groups=self.n_embd, bias=False)
self.key_norm = LayerNorm(self.n_embd)
self.value_conv = MaskedConv1D(self.n_embd, self.n_embd,
kernel_size, stride=stride, padding=padding, groups=self.n_embd,
bias=False)
self.value_norm = LayerNorm(self.n_embd)
self.key = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.query = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.value = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.attn_drop = nn.Dropout(attn_pdrop)
self.proj_drop = nn.Dropout(proj_pdrop)
self.proj = nn.Conv1d(self.n_embd, self.n_embd, 1)
def forward(self, x, mask):
B, C, _T = x.size()
q, qx_mask = self.query_conv(x, mask)
q = self.query_norm(q)
k, kv_mask = self.key_conv(x, mask)
k = self.key_norm(k)
v, _ = self.value_conv(x, mask)
v = self.value_norm(v)
q = self.query(q)
k = self.key(k)
v = self.value(v)
k = k.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
q = q.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
v = v.view(B, self.n_head, self.n_channels, -1).transpose(2, 3)
att = q * self.scale @ k.transpose(-2, -1)
att = att.masked_fill(torch.logical_not(kv_mask[:, :, None, :]),
float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
out = att @ (v * kv_mask[:, :, :, None].float())
out = out.transpose(2, 3).contiguous().view(B, C, -1)
out = self.proj_drop(self.proj(out)) * qx_mask.float()
return out, qx_mask
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_embd': 4, 'n_head': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.utils.data
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp29 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp31 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp34 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp37 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp53 = tl.load(in_ptr3 + (x0 + 16 * x1), xmask)
tmp55 = tl.load(in_ptr3 + (4 + x0 + 16 * x1), xmask)
tmp58 = tl.load(in_ptr3 + (8 + x0 + 16 * x1), xmask)
tmp61 = tl.load(in_ptr3 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tmp30 = tmp29 * tmp1
tmp32 = tmp31 * tmp4
tmp33 = tmp30 + tmp32
tmp35 = tmp34 * tmp8
tmp36 = tmp33 + tmp35
tmp38 = tmp37 * tmp12
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp15
tmp41 = tmp30 - tmp40
tmp42 = tmp41 * tmp41
tmp43 = tmp32 - tmp40
tmp44 = tmp43 * tmp43
tmp45 = tmp42 + tmp44
tmp46 = tmp35 - tmp40
tmp47 = tmp46 * tmp46
tmp48 = tmp45 + tmp47
tmp49 = tmp38 - tmp40
tmp50 = tmp49 * tmp49
tmp51 = tmp48 + tmp50
tmp52 = tmp51 / tmp15
tmp54 = tmp53 * tmp1
tmp56 = tmp55 * tmp4
tmp57 = tmp54 + tmp56
tmp59 = tmp58 * tmp8
tmp60 = tmp57 + tmp59
tmp62 = tmp61 * tmp12
tmp63 = tmp60 + tmp62
tmp64 = tmp63 / tmp15
tmp65 = tmp54 - tmp64
tmp66 = tmp65 * tmp65
tmp67 = tmp56 - tmp64
tmp68 = tmp67 * tmp67
tmp69 = tmp66 + tmp68
tmp70 = tmp59 - tmp64
tmp71 = tmp70 * tmp70
tmp72 = tmp69 + tmp71
tmp73 = tmp62 - tmp64
tmp74 = tmp73 * tmp73
tmp75 = tmp72 + tmp74
tmp76 = tmp75 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
tl.store(out_ptr2 + x2, tmp40, xmask)
tl.store(out_ptr3 + x2, tmp52, xmask)
tl.store(out_ptr4 + x2, tmp64, xmask)
tl.store(out_ptr5 + x2, tmp76, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_div_mean_mul_sqrt_sub_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9,
in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 4
x3 = xindex // 16
x2 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + (x1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp6 = tl.load(in_ptr3 + (x1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + x0, xmask)
tmp17 = tl.load(in_ptr7 + (x1 + 4 * x3), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr8 + (x1 + 4 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr9 + x2, xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr10 + x2, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr11 + x0, xmask)
tmp29 = tl.load(in_ptr12 + (x1 + 4 * x3), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr13 + (x1 + 4 * x3), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr14 + x2, xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr15 + x2, xmask, eviction_policy='evict_last')
tmp1 = tmp0 != 0
tmp3 = tmp2 * tmp0
tmp5 = tmp3 - tmp4
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp15 * tmp0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 + tmp7
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp18 / tmp21
tmp24 = tmp22 * tmp23
tmp26 = tmp24 + tmp25
tmp28 = tmp27 * tmp0
tmp30 = tmp28 - tmp29
tmp32 = tmp31 + tmp7
tmp33 = libdevice.sqrt(tmp32)
tmp34 = tmp30 / tmp33
tmp36 = tmp34 * tmp35
tmp38 = tmp36 + tmp37
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
tl.store(out_ptr2 + x0, tmp26, xmask)
tl.store(out_ptr3 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_logical_not_masked_fill_4(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 != 0
tmp2 = tmp1 == 0
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 != 0
tmp8 = tmp7 == 0
tmp10 = tl.where(tmp8, tmp4, tmp9)
tmp11 = triton_helpers.maximum(tmp5, tmp10)
tmp13 = tmp12 != 0
tmp14 = tmp13 == 0
tmp16 = tl.where(tmp14, tmp4, tmp15)
tmp17 = triton_helpers.maximum(tmp11, tmp16)
tmp19 = tmp18 != 0
tmp20 = tmp19 == 0
tmp22 = tl.where(tmp20, tmp4, tmp21)
tmp23 = triton_helpers.maximum(tmp17, tmp22)
tmp24 = tmp5 - tmp23
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp10 - tmp23
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp16 - tmp23
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tmp22 - tmp23
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tl.store(out_ptr0 + x2, tmp23, xmask)
tl.store(out_ptr1 + x2, tmp34, xmask)
@triton.jit
def triton_poi_fused__softmax_logical_not_masked_fill_5(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
x4 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_out_ptr0 + x3, xmask)
tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp1 = tmp0 != 0
tmp2 = tmp1 == 0
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused__to_copy_mul_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 != 0
tmp5 = tmp4.to(tl.float32)
tmp6 = tmp2 * tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused__to_copy_convolution_mul_7(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask).to(tl.int1)
tmp2 = tmp0 + tmp1
tmp4 = tmp3.to(tl.float32)
tmp5 = tmp2 * tmp4
tl.store(in_out_ptr0 + x3, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1, 3), (3, 3, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_6, (4, 1, 3), (3, 3, 1))
assert_size_stride(primals_7, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_8, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_9, (4, 1, 3), (3, 3, 1))
assert_size_stride(primals_10, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_11, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_12, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_19, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf5 = extern_kernels.convolution(primals_1, primals_6, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf5, (4, 4, 4), (16, 4, 1))
buf9 = extern_kernels.convolution(primals_1, primals_9, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=4, bias=None)
assert_size_stride(buf9, (4, 4, 4), (16, 4, 1))
buf2 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf3 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf7 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf10 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf11 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_mul_pow_sub_0[grid(16)](buf0, primals_3, buf5,
buf9, buf2, buf3, buf6, buf7, buf10, buf11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__to_copy_add_div_mean_mul_sqrt_sub_1[grid(64)](
primals_3, buf0, buf2, buf3, primals_4, primals_5, buf5, buf6,
buf7, primals_7, primals_8, buf9, buf10, buf11, primals_10,
primals_11, buf1, buf4, buf8, buf12, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf10
del buf11
del buf2
del buf3
del buf6
del buf7
del primals_11
del primals_5
del primals_8
buf13 = extern_kernels.convolution(buf4, primals_12, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4), (16, 4, 1))
buf14 = extern_kernels.convolution(buf8, primals_14, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 4), (16, 4, 1))
buf15 = extern_kernels.convolution(buf12, primals_16, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf15, (4, 4, 4), (16, 4, 1))
buf16 = reinterpret_tensor(buf13, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf13
triton_poi_fused_mul_2[grid(64)](buf16, primals_13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_13
buf17 = buf14
del buf14
triton_poi_fused_convolution_3[grid(64)](buf17, primals_15, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_15
buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf16, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf17, (16, 1, 4), (4, 0, 1), 0), out=buf18)
buf19 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf20 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_logical_not_masked_fill_4[grid(64)](primals_3
, buf18, buf19, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf21 = reinterpret_tensor(buf18, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf18
triton_poi_fused__softmax_logical_not_masked_fill_5[grid(256)](buf21,
primals_3, buf19, buf20, 256, XBLOCK=128, num_warps=4, num_stages=1
)
del buf19
buf22 = reinterpret_tensor(buf15, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf15
triton_poi_fused__to_copy_mul_6[grid(64)](buf22, primals_17,
primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_17
buf23 = reinterpret_tensor(buf20, (16, 4, 1), (4, 1, 1), 0)
del buf20
extern_kernels.bmm(reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 0), 0), out=buf23)
buf24 = extern_kernels.convolution(reinterpret_tensor(buf23, (4, 4,
4), (16, 4, 1), 0), primals_18, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf24, (4, 4, 4), (16, 4, 1))
buf25 = buf24
del buf24
triton_poi_fused__to_copy_convolution_mul_7[grid(64)](buf25,
primals_19, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_19
return (buf25, buf1, primals_1, primals_2, primals_3, primals_4,
primals_6, primals_7, primals_9, primals_10, primals_12, primals_14,
primals_16, primals_18, buf0, buf1, buf4, buf5, buf8, buf9, buf12,
buf21, reinterpret_tensor(buf23, (4, 4, 4), (16, 4, 1), 0),
reinterpret_tensor(buf22, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf16, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf17, (16, 4, 1), (4, 1, 4), 0))
class LayerNorm(nn.Module):
"""
LayerNorm that supports inputs of size B, C, T
"""
def __init__(self, num_channels, eps=1e-05, affine=True, device=None,
dtype=None):
super().__init__()
factory_kwargs = {'device': device, 'dtype': dtype}
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.ones([1, num_channels, 1], **
factory_kwargs))
self.bias = nn.Parameter(torch.zeros([1, num_channels, 1], **
factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
assert x.dim() == 3
assert x.shape[1] == self.num_channels
mu = torch.mean(x, dim=1, keepdim=True)
res_x = x - mu
sigma = torch.mean(res_x ** 2, dim=1, keepdim=True)
out = res_x / torch.sqrt(sigma + self.eps)
if self.affine:
out *= self.weight
out += self.bias
return out
class MaskedConv1D(nn.Module):
"""
Masked 1D convolution. Interface remains the same as Conv1d.
Only support a sub set of 1d convs
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
super().__init__()
assert kernel_size % 2 == 1 and kernel_size // 2 == padding
self.stride = stride
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias, padding_mode)
if bias:
torch.nn.init.constant_(self.conv.bias, 0.0)
def forward(self, x, mask):
_B, _C, T = x.size()
assert T % self.stride == 0
out_conv = self.conv(x)
if self.stride > 1:
out_mask = F.interpolate(mask.float(), size=T // self.stride,
mode='nearest')
else:
out_mask = mask.float()
out_conv = out_conv * out_mask.detach()
out_mask = out_mask.bool()
return out_conv, out_mask
class MaskedMHCANew(nn.Module):
"""
Multi Head Conv Attention with mask
Add a depthwise convolution within a standard MHA
The extra conv op can be used to
(1) encode relative position information (relacing position encoding);
(2) downsample the features if needed;
(3) match the feature channels
Note: With current implementation, the downsampled feature will be aligned
to every s+1 time step, where s is the downsampling stride. This allows us
to easily interpolate the corresponding positional embeddings.
Modified from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
def __init__(self, n_embd, n_head, n_qx_stride=1, n_kv_stride=1,
attn_pdrop=0.0, proj_pdrop=0.0):
super().__init__()
assert n_embd % n_head == 0
self.n_embd = n_embd
self.n_head = n_head
self.n_channels = n_embd // n_head
self.scale = 1.0 / math.sqrt(self.n_channels)
assert n_qx_stride == 1 or n_qx_stride % 2 == 0
assert n_kv_stride == 1 or n_kv_stride % 2 == 0
self.n_qx_stride = n_qx_stride
self.n_kv_stride = n_kv_stride
kernel_size = self.n_qx_stride + 1 if self.n_qx_stride > 1 else 3
stride, padding = self.n_kv_stride, kernel_size // 2
self.query_conv = MaskedConv1D(self.n_embd, self.n_embd,
kernel_size, stride=stride, padding=padding, groups=self.n_embd,
bias=False)
self.query_norm = LayerNorm(self.n_embd)
kernel_size = self.n_kv_stride + 1 if self.n_kv_stride > 1 else 3
stride, padding = self.n_kv_stride, kernel_size // 2
self.key_conv = MaskedConv1D(self.n_embd, self.n_embd, kernel_size,
stride=stride, padding=padding, groups=self.n_embd, bias=False)
self.key_norm = LayerNorm(self.n_embd)
self.value_conv = MaskedConv1D(self.n_embd, self.n_embd,
kernel_size, stride=stride, padding=padding, groups=self.n_embd,
bias=False)
self.value_norm = LayerNorm(self.n_embd)
self.key = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.query = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.value = nn.Conv1d(self.n_embd, self.n_embd, 1)
self.attn_drop = nn.Dropout(attn_pdrop)
self.proj_drop = nn.Dropout(proj_pdrop)
self.proj = nn.Conv1d(self.n_embd, self.n_embd, 1)
def forward(self, input_0, input_1):
primals_2 = self.query_conv.conv.weight
primals_4 = self.query_norm.weight
primals_5 = self.query_norm.bias
primals_6 = self.key_conv.conv.weight
primals_7 = self.key_norm.weight
primals_8 = self.key_norm.bias
primals_9 = self.value_conv.conv.weight
primals_10 = self.value_norm.weight
primals_11 = self.value_norm.bias
primals_12 = self.key.weight
primals_13 = self.key.bias
primals_14 = self.query.weight
primals_15 = self.query.bias
primals_16 = self.value.weight
primals_17 = self.value.bias
primals_18 = self.proj.weight
primals_19 = self.proj.bias
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0], output[1]
| yjh0410/actionformer_release | MaskedMHCA | false | 16,801 | [
"MIT"
] | 61 | 7a97422111d3e29c8d2e14088c850c6975855ea7 | https://github.com/yjh0410/actionformer_release/tree/7a97422111d3e29c8d2e14088c850c6975855ea7 |
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/vx/cvxzmthv4i2niuhjkx7pdwegys74ubmwp36fuzpk743r7lkqg4tm.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul]
# Source node to ATen node mapping:
# _weight_norm => div, mul, pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
triton_per_fused_div_mul_norm_0 = async_compile.triton('triton_per_fused_div_mul_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp6 = tl.load(in_ptr1 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = libdevice.sqrt(tmp4)
tmp8 = tmp7 / tmp5
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None)
tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/uv/cuvoclg352i5kswqhjwhhlw2wpo4y2a4drvsppwkgh6ryoueqbfu.py
# Topologically Sorted Source Nodes: [x_1, x_4], Original ATen: [aten.relu, aten.mul]
# Source node to ATen node mapping:
# x_1 => relu
# x_4 => mul_2
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %expand), kwargs = {})
triton_poi_fused_mul_relu_1 = async_compile.triton('triton_poi_fused_mul_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 * tmp4
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wo/cwo7i4dcowaxozwnj57wqq4ba45uo7pev3igxxitkuqs52wnxctl.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_6 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vw/cvwwlf5hh74femiy52pxilo5w77x22ndrh7cd3nkkzhhazqhimhy.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_6 => div_3, sum_4
# Graph fragment:
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_4), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (), ())
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (), ())
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_div_mul_norm_0.run(buf1, primals_2, primals_1, buf2, 1, 16, grid=grid(1), stream=stream0)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_3
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4; del buf4 # reuse
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten.norm, aten.div, aten.mul]
triton_per_fused_div_mul_norm_0.run(buf5, primals_6, primals_5, buf6, 1, 16, grid=grid(1), stream=stream0)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, primals_8, reinterpret_tensor(buf6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7)
del primals_7
buf8 = empty_strided_cuda((), (), torch.float32)
buf9 = buf8; del buf8 # reuse
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten.norm, aten.div, aten.mul]
triton_per_fused_div_mul_norm_0.run(buf9, primals_10, primals_9, buf10, 1, 16, grid=grid(1), stream=stream0)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_4], Original ATen: [aten.relu, aten.mul]
triton_poi_fused_mul_relu_1.run(buf3, buf7, buf11, 64, grid=grid(64), stream=stream0)
buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(buf10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_11
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf12, buf13, 64, grid=grid(64), stream=stream0)
buf14 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf13, buf14, 64, grid=grid(64), stream=stream0)
del buf13
return (buf14, buf2, buf6, buf10, primals_1, primals_2, primals_5, primals_6, primals_8, primals_9, primals_10, buf1, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), buf3, buf5, buf7, buf9, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf14, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class Attention(nn.Module):
def __init__(self, v_features, q_features, mid_features, glimpses, drop=0.0
):
super(Attention, self).__init__()
self.lin_v = FCNet(v_features, mid_features, activate='relu')
self.lin_q = FCNet(q_features, mid_features, activate='relu')
self.lin = FCNet(mid_features, glimpses, drop=drop)
def forward(self, v, q):
"""
v = batch, num_obj, dim
q = batch, dim
"""
v = self.lin_v(v)
q = self.lin_q(q)
batch, num_obj, _ = v.shape
_, q_dim = q.shape
q = q.unsqueeze(1).expand(batch, num_obj, q_dim)
x = v * q
x = self.lin(x)
x = F.softmax(x, dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'v_features': 4, 'q_features': 4, 'mid_features': 4,
'glimpses': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = libdevice.sqrt(tmp4)
tmp8 = tmp7 / tmp5
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None)
@triton.jit
def triton_poi_fused_mul_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 * tmp4
tl.store(out_ptr0 + x3, tmp5, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (), ())
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (), ())
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_mul_norm_0[grid(1)](buf1, primals_2, primals_1,
buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (16,
4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_3
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_div_mul_norm_0[grid(1)](buf5, primals_6, primals_5,
buf6, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, primals_8, reinterpret_tensor(buf6,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7)
del primals_7
buf8 = empty_strided_cuda((), (), torch.float32)
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_div_mul_norm_0[grid(1)](buf9, primals_10,
primals_9, buf10, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_relu_1[grid(64)](buf3, buf7, buf11, 64, XBLOCK
=64, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (16, 4),
(4, 1), 0), reinterpret_tensor(buf10, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf12)
del primals_11
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0)
del buf12
triton_poi_fused__softmax_3[grid(64)](buf13, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf13
return (buf14, buf2, buf6, buf10, primals_1, primals_2, primals_5,
primals_6, primals_8, primals_9, primals_10, buf1,
reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), buf3, buf5, buf7,
buf9, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf14, buf10)
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class AttentionNew(nn.Module):
def __init__(self, v_features, q_features, mid_features, glimpses, drop=0.0
):
super(AttentionNew, self).__init__()
self.lin_v = FCNet(v_features, mid_features, activate='relu')
self.lin_q = FCNet(q_features, mid_features, activate='relu')
self.lin = FCNet(mid_features, glimpses, drop=drop)
def forward(self, input_0, input_1):
primals_3 = self.lin_v.lin.bias
primals_1 = self.lin_v.lin.weight_g
primals_2 = self.lin_v.lin.weight_v
primals_7 = self.lin_q.lin.bias
primals_5 = self.lin_q.lin.weight_g
primals_6 = self.lin_q.lin.weight_v
primals_11 = self.lin.lin.bias
primals_9 = self.lin.lin.weight_g
primals_8 = self.lin.lin.weight_v
primals_4 = input_0
primals_10 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| zaynmi/semantic-equivalent-da-for-vqa | Attention | false | 16,802 | [
"MIT"
] | 298 | f121fb3e8fee8af5f1935a7526f19e0d884bd95b | https://github.com/zaynmi/semantic-equivalent-da-for-vqa/tree/f121fb3e8fee8af5f1935a7526f19e0d884bd95b |
EltwiseProdScoring | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/oe/coethfes7r2w6tzgot3xnemdzapqkiikobh7ozi3ggenkf6qr6g2.py
# Topologically Sorted Source Nodes: [eltprod], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# eltprod => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %view_3), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4096
x2 = (xindex // 16384)
x3 = xindex % 16384
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4096*x2)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x4), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 4), (4, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 256), (256, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [context], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 256), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4, 256), (16384, 4096, 1024, 256, 1), torch.float32)
# Topologically Sorted Source Nodes: [eltprod], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf0, buf1, buf2, 65536, grid=grid(65536), stream=stream0)
buf4 = empty_strided_cuda((256, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 256), (256, 1), 0), reinterpret_tensor(primals_7, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf4)
del primals_8
return (reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (256, 256), (256, 1), 0), primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class EltwiseProdScoring(nn.Module):
"""
Linearly mapping h and v to the same dimension, and do a elementwise
multiplication and a linear scoring
"""
def __init__(self, h_dim, a_dim, dot_dim=256):
"""Initialize layer."""
super(EltwiseProdScoring, self).__init__()
self.linear_in_h = nn.Linear(h_dim, dot_dim, bias=True)
self.linear_in_a = nn.Linear(a_dim, dot_dim, bias=True)
self.linear_out = nn.Linear(dot_dim, 1, bias=True)
def forward(self, h, all_u_t, mask=None):
"""Propagate h through the network.
h: batch x h_dim
all_u_t: batch x a_num x a_dim
"""
target = self.linear_in_h(h).unsqueeze(1)
context = self.linear_in_a(all_u_t)
eltprod = torch.mul(target, context)
logits = self.linear_out(eltprod).squeeze(2)
return logits
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'h_dim': 4, 'a_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4096
x2 = xindex // 16384
x3 = xindex % 16384
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * x2), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x3, None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 4), (4, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 256), (256, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 256), (1, 4),
0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4, 256), (16384, 4096, 1024,
256, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(65536)](buf0, buf1, buf2, 65536, XBLOCK
=256, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((256, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 256),
(256, 1), 0), reinterpret_tensor(primals_7, (256, 1), (1, 256),
0), alpha=1, beta=1, out=buf4)
del primals_8
return reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (256, 256), (256, 1), 0), primals_7
class EltwiseProdScoringNew(nn.Module):
"""
Linearly mapping h and v to the same dimension, and do a elementwise
multiplication and a linear scoring
"""
def __init__(self, h_dim, a_dim, dot_dim=256):
"""Initialize layer."""
super(EltwiseProdScoringNew, self).__init__()
self.linear_in_h = nn.Linear(h_dim, dot_dim, bias=True)
self.linear_in_a = nn.Linear(a_dim, dot_dim, bias=True)
self.linear_out = nn.Linear(dot_dim, 1, bias=True)
def forward(self, input_0, input_1):
primals_1 = self.linear_in_h.weight
primals_2 = self.linear_in_h.bias
primals_4 = self.linear_in_a.weight
primals_5 = self.linear_in_a.bias
primals_7 = self.linear_out.weight
primals_8 = self.linear_out.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| zhangybzbo/speaker_follower | EltwiseProdScoring | false | 16,803 | [
"BSD-2-Clause",
"MIT"
] | 117 | e4d109ee26b2f57066adc9720443abf842ee9a9d | https://github.com/zhangybzbo/speaker_follower/tree/e4d109ee26b2f57066adc9720443abf842ee9a9d |
RelativePositionalEmbedding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/gq/cgqd7fktloq7lv6oi5n36kfikqz3kpbs27ktt72i65ov5i3cdgrp.py
# Topologically Sorted Source Nodes: [new_tensor, pos, sub, add], Original ATen: [aten.lift_fresh, aten._to_copy, aten.sub, aten.add]
# Source node to ATen node mapping:
# add => add
# new_tensor => lift_fresh_copy
# pos => convert_element_type
# sub => sub
# Graph fragment:
# %lift_fresh_copy : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%_tensor_constant0,), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lift_fresh_copy, torch.int64), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convert_element_type, %unsqueeze), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 512), kwargs = {})
triton_poi_fused__to_copy_add_lift_fresh_sub_0 = async_compile.triton('triton_poi_fused__to_copy_add_lift_fresh_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_lift_fresh_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_lift_fresh_sub_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = tmp13.to(tl.int32)
tmp15 = x1
tmp16 = tmp15 < tmp1
tmp17 = tmp15 < tmp3
tmp18 = tl.where(tmp17, tmp5, tmp6)
tmp19 = tmp15 < tmp8
tmp20 = tl.where(tmp19, tmp10, tmp11)
tmp21 = tl.where(tmp16, tmp18, tmp20)
tmp22 = tmp21.to(tl.int32)
tmp23 = tmp14 - tmp22
tmp24 = tl.full([1], 512, tl.int64)
tmp25 = tmp23 + tmp24
tl.store(out_ptr0 + (x2), tmp25, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/36/c365htijy33po4c6ywgzx6c4qor4y6w2axufpmidriypi64t7y6f.py
# Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
# Source node to ATen node mapping:
# embedding => embedding
# Graph fragment:
# %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %add), kwargs = {})
triton_poi_fused_embedding_1 = async_compile.triton('triton_poi_fused_embedding_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_embedding_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_embedding_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x0 = xindex % 4
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = tmp13.to(tl.int32)
tmp15 = x2
tmp16 = tmp15 < tmp1
tmp17 = tmp15 < tmp3
tmp18 = tl.where(tmp17, tmp5, tmp6)
tmp19 = tmp15 < tmp8
tmp20 = tl.where(tmp19, tmp10, tmp11)
tmp21 = tl.where(tmp16, tmp18, tmp20)
tmp22 = tmp21.to(tl.int32)
tmp23 = tmp14 - tmp22
tmp24 = tl.full([1], 512, tl.int64)
tmp25 = tmp23 + tmp24
tmp26 = tl.load(in_ptr0 + (x0 + (4*tmp25)), xmask)
tl.store(out_ptr0 + (x4), tmp26, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
# Topologically Sorted Source Nodes: [new_tensor, pos, sub, add], Original ATen: [aten.lift_fresh, aten._to_copy, aten.sub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_add_lift_fresh_sub_0.run(buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
triton_poi_fused_embedding_1.run(primals_2, buf1, 64, grid=grid(64), stream=stream0)
del primals_2
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1024, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class RelativePositionalEmbedding(nn.Module):
def __init__(self, n_model, max_len=1024):
super().__init__()
self.embed = nn.Embedding(max_len, n_model)
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
w = self.embed.weight
max_len, n_model = w.shape
pos = torch.cat((w.new_tensor(range(-max_len // 2, 0)), w.
new_tensor(range(max_len // 2))))
w = pos.unsqueeze(-1) / 10000 ** (w.new_tensor(range(n_model)) // 2 *
2 / n_model)
w[:, 0::2], w[:, 1::2] = w[:, 0::2].sin(), w[:, 1::2].cos()
self.embed.weight.copy_(w)
def forward(self, x):
pos = x.new_tensor(range(x.shape[1])).long()
offset = sum(divmod(self.embed.weight.shape[0], 2))
return self.embed(pos - pos.unsqueeze(-1) + offset)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_model': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_add_lift_fresh_sub_0(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = tmp13.to(tl.int32)
tmp15 = x1
tmp16 = tmp15 < tmp1
tmp17 = tmp15 < tmp3
tmp18 = tl.where(tmp17, tmp5, tmp6)
tmp19 = tmp15 < tmp8
tmp20 = tl.where(tmp19, tmp10, tmp11)
tmp21 = tl.where(tmp16, tmp18, tmp20)
tmp22 = tmp21.to(tl.int32)
tmp23 = tmp14 - tmp22
tmp24 = tl.full([1], 512, tl.int64)
tmp25 = tmp23 + tmp24
tl.store(out_ptr0 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_embedding_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x0 = xindex % 4
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = 1.0
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.full([1], 3, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = 2.0
tmp11 = 3.0
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp7, tmp12)
tmp14 = tmp13.to(tl.int32)
tmp15 = x2
tmp16 = tmp15 < tmp1
tmp17 = tmp15 < tmp3
tmp18 = tl.where(tmp17, tmp5, tmp6)
tmp19 = tmp15 < tmp8
tmp20 = tl.where(tmp19, tmp10, tmp11)
tmp21 = tl.where(tmp16, tmp18, tmp20)
tmp22 = tmp21.to(tl.int32)
tmp23 = tmp14 - tmp22
tmp24 = tl.full([1], 512, tl.int64)
tmp25 = tmp23 + tmp24
tmp26 = tl.load(in_ptr0 + (x0 + 4 * tmp25), xmask)
tl.store(out_ptr0 + x4, tmp26, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_add_lift_fresh_sub_0[grid(16)](buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_embedding_1[grid(64)](primals_2, buf1, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_2
return buf1, buf0
class RelativePositionalEmbeddingNew(nn.Module):
def __init__(self, n_model, max_len=1024):
super().__init__()
self.embed = nn.Embedding(max_len, n_model)
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
w = self.embed.weight
max_len, n_model = w.shape
pos = torch.cat((w.new_tensor(range(-max_len // 2, 0)), w.
new_tensor(range(max_len // 2))))
w = pos.unsqueeze(-1) / 10000 ** (w.new_tensor(range(n_model)) // 2 *
2 / n_model)
w[:, 0::2], w[:, 1::2] = w[:, 0::2].sin(), w[:, 1::2].cos()
self.embed.weight.copy_(w)
def forward(self, input_0):
primals_2 = self.embed.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| yzhangcs/parser | RelativePositionalEmbedding | false | 16,804 | [
"MIT"
] | 439 | 3abebde1c9fe0bf2e99adce845aaf2a04b194f8a | https://github.com/yzhangcs/parser/tree/3abebde1c9fe0bf2e99adce845aaf2a04b194f8a |
Classifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/vx/cvxzmthv4i2niuhjkx7pdwegys74ubmwp36fuzpk743r7lkqg4tm.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul]
# Source node to ATen node mapping:
# _weight_norm => div, mul, pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
triton_per_fused_div_mul_norm_0 = async_compile.triton('triton_per_fused_div_mul_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp6 = tl.load(in_ptr1 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = libdevice.sqrt(tmp4)
tmp8 = tmp7 / tmp5
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None)
tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5j/c5jhcw3xz45rvmazt6hsxvzwe25bwzg5m4uwsowogmy4kwkpp5tf.py
# Topologically Sorted Source Nodes: [x_1, x_3, x_4], Original ATen: [aten.relu, aten.mul]
# Source node to ATen node mapping:
# x_1 => relu
# x_3 => relu_1
# x_4 => mul_2
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %relu_1), kwargs = {})
triton_poi_fused_mul_relu_1 = async_compile.triton('triton_poi_fused_mul_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 * tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gm/cgmflgdlpeeb52xctoa47uvw47ycyf7ahlj5wdscxdatpbwcboco.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_6 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (), ())
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_9, (), ())
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (), ())
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_div_mul_norm_0.run(buf1, primals_2, primals_1, buf2, 1, 16, grid=grid(1), stream=stream0)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_3
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4; del buf4 # reuse
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten.norm, aten.div, aten.mul]
triton_per_fused_div_mul_norm_0.run(buf5, primals_6, primals_5, buf6, 1, 16, grid=grid(1), stream=stream0)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_8, (64, 4), (4, 1), 0), reinterpret_tensor(buf6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7)
del primals_7
buf8 = empty_strided_cuda((), (), torch.float32)
buf9 = buf8; del buf8 # reuse
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten.norm, aten.div, aten.mul]
triton_per_fused_div_mul_norm_0.run(buf9, primals_10, primals_9, buf10, 1, 16, grid=grid(1), stream=stream0)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_3, x_4], Original ATen: [aten.relu, aten.mul]
triton_poi_fused_mul_relu_1.run(buf3, buf7, buf11, 256, grid=grid(256), stream=stream0)
buf12 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(buf10, (4, 4), (1, 4), 0), out=buf12)
buf13 = empty_strided_cuda((), (), torch.float32)
buf14 = buf13; del buf13 # reuse
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten.norm, aten.div, aten.mul]
triton_per_fused_div_mul_norm_0.run(buf14, primals_13, primals_12, buf15, 1, 16, grid=grid(1), stream=stream0)
buf16 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf12 # reuse
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf16, primals_11, buf18, 256, grid=grid(256), stream=stream0)
del primals_11
buf17 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_14, reinterpret_tensor(buf16, (64, 4), (4, 1), 0), reinterpret_tensor(buf15, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf17)
del primals_14
return (reinterpret_tensor(buf17, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2, buf6, buf10, buf15, primals_1, primals_2, primals_5, primals_6, primals_9, primals_10, primals_12, primals_13, buf1, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf3, buf5, reinterpret_tensor(primals_8, (64, 4), (4, 1), 0), buf7, buf9, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), buf14, reinterpret_tensor(buf16, (64, 4), (4, 1), 0), buf15, buf18, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class Classifier(nn.Module):
def __init__(self, in_features, mid_features, out_features, drop=0.0):
super(Classifier, self).__init__()
self.lin11 = FCNet(in_features[0], mid_features, activate='relu')
self.lin12 = FCNet(in_features[1], mid_features, activate='relu')
self.lin2 = FCNet(mid_features, mid_features, activate='relu')
self.lin3 = FCNet(mid_features, out_features, drop=drop)
def forward(self, v, q):
x = self.lin11(v) * self.lin12(q)
x = self.lin2(x)
x = self.lin3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': [4, 4], 'mid_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = libdevice.sqrt(tmp4)
tmp8 = tmp7 / tmp5
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None)
@triton.jit
def triton_poi_fused_mul_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 * tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (), ())
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_9, (), ())
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (), ())
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_mul_norm_0[grid(1)](buf1, primals_2, primals_1,
buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64,
4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_3
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_div_mul_norm_0[grid(1)](buf5, primals_6, primals_5,
buf6, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_8, (64,
4), (4, 1), 0), reinterpret_tensor(buf6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf7)
del primals_7
buf8 = empty_strided_cuda((), (), torch.float32)
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_div_mul_norm_0[grid(1)](buf9, primals_10,
primals_9, buf10, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_relu_1[grid(256)](buf3, buf7, buf11, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0),
reinterpret_tensor(buf10, (4, 4), (1, 4), 0), out=buf12)
buf13 = empty_strided_cuda((), (), torch.float32)
buf14 = buf13
del buf13
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_div_mul_norm_0[grid(1)](buf14, primals_13,
primals_12, buf15, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf16 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf12
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(256)](buf16,
primals_11, buf18, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf17 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_14, reinterpret_tensor(buf16, (64, 4),
(4, 1), 0), reinterpret_tensor(buf15, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf17)
del primals_14
return (reinterpret_tensor(buf17, (4, 4, 4, 4), (64, 16, 4, 1), 0),
buf2, buf6, buf10, buf15, primals_1, primals_2, primals_5,
primals_6, primals_9, primals_10, primals_12, primals_13, buf1,
reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf3, buf5,
reinterpret_tensor(primals_8, (64, 4), (4, 1), 0), buf7, buf9,
reinterpret_tensor(buf11, (64, 4), (4, 1), 0), buf14,
reinterpret_tensor(buf16, (64, 4), (4, 1), 0), buf15, buf18, buf10)
class FCNet(nn.Module):
def __init__(self, in_size, out_size, activate=None, drop=0.0):
super(FCNet, self).__init__()
self.lin = weight_norm(nn.Linear(in_size, out_size), dim=None)
self.drop_value = drop
self.drop = nn.Dropout(drop)
self.activate = activate.lower() if activate is not None else None
if activate == 'relu':
self.ac_fn = nn.ReLU()
elif activate == 'sigmoid':
self.ac_fn = nn.Sigmoid()
elif activate == 'tanh':
self.ac_fn = nn.Tanh()
def forward(self, x):
if self.drop_value > 0:
x = self.drop(x)
x = self.lin(x)
if self.activate is not None:
x = self.ac_fn(x)
return x
class ClassifierNew(nn.Module):
def __init__(self, in_features, mid_features, out_features, drop=0.0):
super(ClassifierNew, self).__init__()
self.lin11 = FCNet(in_features[0], mid_features, activate='relu')
self.lin12 = FCNet(in_features[1], mid_features, activate='relu')
self.lin2 = FCNet(mid_features, mid_features, activate='relu')
self.lin3 = FCNet(mid_features, out_features, drop=drop)
def forward(self, input_0, input_1):
primals_3 = self.lin11.lin.bias
primals_1 = self.lin11.lin.weight_g
primals_2 = self.lin11.lin.weight_v
primals_7 = self.lin12.lin.bias
primals_5 = self.lin12.lin.weight_g
primals_6 = self.lin12.lin.weight_v
primals_11 = self.lin2.lin.bias
primals_9 = self.lin2.lin.weight_g
primals_10 = self.lin2.lin.weight_v
primals_14 = self.lin3.lin.bias
primals_12 = self.lin3.lin.weight_g
primals_13 = self.lin3.lin.weight_v
primals_4 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
| zaynmi/semantic-equivalent-da-for-vqa | Classifier | false | 16,805 | [
"MIT"
] | 298 | f121fb3e8fee8af5f1935a7526f19e0d884bd95b | https://github.com/zaynmi/semantic-equivalent-da-for-vqa/tree/f121fb3e8fee8af5f1935a7526f19e0d884bd95b |
GumbelSigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/m5/cm5yopawo2x6h3gb6d35oi67f7sissigvhxgd74rfzc6ld37cb4o.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_3 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%div, %div_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = 1e-08
tmp7 = tmp5 + tmp6
tmp8 = tl_math.log(tmp7)
tmp9 = tl.load(in_ptr1 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp10 = tmp9 + tmp6
tmp11 = tl_math.log(tmp10)
tmp12 = -1.0
tmp13 = tmp11 * tmp12
tmp14 = tmp13 + tmp6
tmp15 = tl_math.log(tmp14)
tmp16 = tmp15 * tmp12
tmp17 = tmp8 + tmp16
tmp18 = 33.333322222225924
tmp19 = tmp17 * tmp18
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp4, tmp19, tmp20)
tmp22 = tmp0 >= tmp3
tmp23 = tl.full([1], 8, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tl.load(in_ptr0 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp22 & xmask, other=0.0)
tmp26 = 1.0
tmp27 = tmp26 - tmp25
tmp28 = tmp27 + tmp6
tmp29 = tl_math.log(tmp28)
tmp30 = tl.load(in_ptr2 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp22 & xmask, other=0.0)
tmp31 = tmp30 + tmp6
tmp32 = tl_math.log(tmp31)
tmp33 = tmp32 * tmp12
tmp34 = tmp33 + tmp6
tmp35 = tl_math.log(tmp34)
tmp36 = tmp35 * tmp12
tmp37 = tmp29 + tmp36
tmp38 = tmp37 * tmp18
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp22, tmp38, tmp39)
tmp41 = tl.where(tmp4, tmp21, tmp40)
tl.store(out_ptr0 + (x3), tmp41, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2y/c2yi65m3rujd6ptf72lfspr2o76uja7a2y3f2o7ssqstzzuf5kas.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten._softmax, aten.index]
# Source node to ATen node mapping:
# x_4 => amax, div_2, exp, sub_1, sum_1
# x_5 => index
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%cat, [1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cat, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%div_2, [None, %full_default]), kwargs = {})
triton_per_fused__softmax_index_1 = async_compile.triton('triton_per_fused__softmax_index_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 8],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_index_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_index_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 8
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (128*x1)), xmask, other=0.0)
tmp11 = tl.load(in_ptr0 + (x0 + (128*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp11 - tmp4
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp13 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (x3), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x_N], Original ATen: [aten.rand_like]
buf0 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
# Topologically Sorted Source Nodes: [r_N], Original ATen: [aten.rand_like]
buf2 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf1, buf3, buf4, 512, grid=grid(512), stream=stream0)
del arg0_1
del buf1
del buf3
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 4, 4), (16, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten._softmax, aten.index]
triton_per_fused__softmax_index_1.run(buf7, buf4, 64, 8, grid=grid(64), stream=stream0)
del buf4
return (buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GumbelSigmoid(nn.Module):
def __init__(self, max_T, decay_alpha):
super(GumbelSigmoid, self).__init__()
self.max_T = max_T
self.decay_alpha = decay_alpha
self.softmax = nn.Softmax(dim=1)
self.p_value = 1e-08
self.register_buffer('cur_T', torch.tensor(max_T))
def forward(self, x):
if self.training:
_cur_T = self.cur_T
else:
_cur_T = 0.03
r = 1 - x
x = (x + self.p_value).log()
r = (r + self.p_value).log()
x_N = torch.rand_like(x)
r_N = torch.rand_like(r)
x_N = -1 * (x_N + self.p_value).log()
r_N = -1 * (r_N + self.p_value).log()
x_N = -1 * (x_N + self.p_value).log()
r_N = -1 * (r_N + self.p_value).log()
x = x + x_N
x = x / (_cur_T + self.p_value)
r = r + r_N
r = r / (_cur_T + self.p_value)
x = torch.cat((x, r), dim=1)
x = self.softmax(x)
x = x[:, [0], :, :]
if self.training:
self.cur_T = self.cur_T * self.decay_alpha
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'max_T': 4, 'decay_alpha': 4}]
| import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = 1e-08
tmp7 = tmp5 + tmp6
tmp8 = tl_math.log(tmp7)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp10 = tmp9 + tmp6
tmp11 = tl_math.log(tmp10)
tmp12 = -1.0
tmp13 = tmp11 * tmp12
tmp14 = tmp13 + tmp6
tmp15 = tl_math.log(tmp14)
tmp16 = tmp15 * tmp12
tmp17 = tmp8 + tmp16
tmp18 = 33.333322222225924
tmp19 = tmp17 * tmp18
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp4, tmp19, tmp20)
tmp22 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp25 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp22 &
xmask, other=0.0)
tmp26 = 1.0
tmp27 = tmp26 - tmp25
tmp28 = tmp27 + tmp6
tmp29 = tl_math.log(tmp28)
tmp30 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp22 &
xmask, other=0.0)
tmp31 = tmp30 + tmp6
tmp32 = tl_math.log(tmp31)
tmp33 = tmp32 * tmp12
tmp34 = tmp33 + tmp6
tmp35 = tl_math.log(tmp34)
tmp36 = tmp35 * tmp12
tmp37 = tmp29 + tmp36
tmp38 = tmp37 * tmp18
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp22, tmp38, tmp39)
tmp41 = tl.where(tmp4, tmp21, tmp40)
tl.store(out_ptr0 + x3, tmp41, xmask)
@triton.jit
def triton_per_fused__softmax_index_1(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 128 * x1), xmask, other=0.0)
tmp11 = tl.load(in_ptr0 + (x0 + 128 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp11 - tmp4
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp13 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp14, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](arg0_1, buf1, buf3, buf4, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf1
del buf3
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf6
triton_per_fused__softmax_index_1[grid(64)](buf7, buf4, 64, 8,
XBLOCK=1, num_warps=2, num_stages=1)
del buf4
return buf7,
class GumbelSigmoidNew(nn.Module):
def __init__(self, max_T, decay_alpha):
super(GumbelSigmoidNew, self).__init__()
self.max_T = max_T
self.decay_alpha = decay_alpha
self.softmax = nn.Softmax(dim=1)
self.p_value = 1e-08
self.register_buffer('cur_T', torch.tensor(max_T))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| zdaxie/SpatiallyAdaptiveInference-Detection | GumbelSigmoid | false | 16,806 | [
"Apache-2.0"
] | 55 | 323801deac6f0641d00ecb23f6885df8483cc447 | https://github.com/zdaxie/SpatiallyAdaptiveInference-Detection/tree/323801deac6f0641d00ecb23f6885df8483cc447 |
AdaptiveInstanceNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3g/c3gbtbflqsjqgsddw3vxujyeiz7n7yqa2yck5o4n6wm34w67wrid.py
# Topologically Sorted Source Nodes: [out, mul, out_1], Original ATen: [aten._native_batch_norm_legit, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul_1
# out => add, rsqrt, var_mean
# out_1 => add_1
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %view_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %getitem_1), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_mul_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + (8*x3)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (4 + x2 + (8*x3)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = tmp0 - tmp10
tmp26 = tmp25 * tmp21
tmp27 = tmp24 * tmp26
tmp30 = tmp28 + tmp29
tmp31 = tmp27 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (16*x0)), tmp31, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out, mul, out_1], Original ATen: [aten._native_batch_norm_legit, aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0.run(buf4, primals_4, buf0, primals_2, buf1, buf5, 16, 16, grid=grid(16), stream=stream0)
del buf0
del primals_2
return (buf5, primals_3, primals_4, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch
import torch.nn as nn
import torch.sparse
class AdaptiveInstanceNorm(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.linear = nn.Linear(style_dim, in_channel * 2)
self.linear.weight.data.normal_()
self.linear.bias.data.zero_()
self.linear.bias.data[:in_channel] = 1
self.linear.bias.data[in_channel:] = 0
def forward(self, input, style):
style = self.linear(style).unsqueeze(2).unsqueeze(3)
gamma, beta = style.chunk(2, 1)
out = self.norm(input)
out = gamma * out + beta
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'style_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch
import torch.nn as nn
import torch.sparse
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = tmp0 - tmp10
tmp26 = tmp25 * tmp21
tmp27 = tmp24 * tmp26
tmp30 = tmp28 + tmp29
tmp31 = tmp27 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp31, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0[grid(16)](buf4,
primals_4, buf0, primals_2, buf1, buf5, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, primals_4, buf1, buf4
class AdaptiveInstanceNormNew(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.linear = nn.Linear(style_dim, in_channel * 2)
self.linear.weight.data.normal_()
self.linear.bias.data.zero_()
self.linear.bias.data[:in_channel] = 1
self.linear.bias.data[in_channel:] = 0
def forward(self, input_0, input_1):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| zhengqili/Crowdsampling-the-Plenoptic-Function | AdaptiveInstanceNorm | false | 16,807 | [
"MIT"
] | 70 | 3164e9f9574d597690f83dfdfb34cc470d2dcb88 | https://github.com/zhengqili/Crowdsampling-the-Plenoptic-Function/tree/3164e9f9574d597690f83dfdfb34cc470d2dcb88 |
CIoULoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/jj/cjjvrms24nnpnfjumcqaehaxxlqb5ybo2yzkt25zxjbawf6fhxb4.py
# Topologically Sorted Source Nodes: [add_6, add_7, sub_11, pow_3, left, add_8, add_9, sub_12, pow_4, right, rho2, pow_1, pow_2, add_2, c2, truediv_5, w2, sub_10, h2, truediv_3, atan, w1, sub_8, h1, truediv_4, atan_1, sub_13, pow_5, v, pow_6, sub_1, sub_2, ap, sub_3, sub_4, ag, add, overlap, sub_5, union, ious, sub_14, add_11, truediv_6, add_12, cious, loss], Original ATen: [aten.add, aten.sub, aten.pow, aten.div, aten.atan, aten.mul, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_11 => add_11
# add_12 => add_12
# add_2 => add_2
# add_6 => add_6
# add_7 => add_7
# add_8 => add_8
# add_9 => add_9
# ag => mul_2
# ap => mul_1
# atan => atan
# atan_1 => atan_1
# c2 => add_3
# cious => sub_15
# h1 => add_4
# h2 => add_5
# ious => div
# left => div_1
# loss => sub_16
# overlap => mul
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# pow_4 => pow_4
# pow_5 => pow_5
# pow_6 => pow_6
# rho2 => add_10
# right => div_2
# sub_1 => sub_1
# sub_10 => sub_10
# sub_11 => sub_11
# sub_12 => sub_12
# sub_13 => sub_13
# sub_14 => sub_14
# sub_2 => sub_2
# sub_3 => sub_3
# sub_4 => sub_4
# sub_5 => sub_5
# sub_8 => sub_8
# truediv_3 => div_3
# truediv_4 => div_4
# truediv_5 => div_5
# truediv_6 => div_6
# union => add_1
# v => mul_3
# w1 => sub_7
# w2 => sub_9
# Graph fragment:
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_16, %select_18), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_12, %select_14), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_6, %add_7), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_11, 2), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_3, 4), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_17, %select_19), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_13, %select_15), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %add_9), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_12, 2), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_4, 4), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_1, %div_2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_10, 2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_11, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 1e-06), kwargs = {})
# %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_10, %add_3), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_18, %select_16), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_19, %select_17), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_10, 1e-06), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_9, %add_5), kwargs = {})
# %atan : [num_users=1] = call_function[target=torch.ops.aten.atan.default](args = (%div_3,), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_14, %select_12), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_15, %select_13), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_8, 1e-06), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_7, %add_4), kwargs = {})
# %atan_1 : [num_users=1] = call_function[target=torch.ops.aten.atan.default](args = (%div_4,), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%atan, %atan_1), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_13, 2), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_5, 0.4052847345693511), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_3, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_2, %select_3), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_4, %select_5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %sub_2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_6, %select_7), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_8, %select_9), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %sub_4), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %select_1), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_5, 1e-06), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_1), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_14, %mul_3), kwargs = {})
# %div_6 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_6, %add_11), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_5, %div_6), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %add_12), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sub_15), kwargs = {})
triton_poi_fused_add_atan_div_mul_pow_rsub_sub_0 = async_compile.triton('triton_poi_fused_add_atan_div_mul_pow_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_atan_div_mul_pow_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_atan_div_mul_pow_rsub_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp13 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp14 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = 0.25
tmp9 = tmp7 * tmp8
tmp12 = tmp10 + tmp11
tmp15 = tmp13 + tmp14
tmp16 = tmp12 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp17 * tmp8
tmp19 = tmp9 + tmp18
tmp20 = triton_helpers.maximum(tmp4, tmp1)
tmp21 = triton_helpers.minimum(tmp3, tmp0)
tmp22 = tmp20 - tmp21
tmp23 = 0.0
tmp24 = triton_helpers.maximum(tmp22, tmp23)
tmp25 = tmp24 * tmp24
tmp26 = triton_helpers.maximum(tmp14, tmp11)
tmp27 = triton_helpers.minimum(tmp13, tmp10)
tmp28 = tmp26 - tmp27
tmp29 = triton_helpers.maximum(tmp28, tmp23)
tmp30 = tmp29 * tmp29
tmp31 = tmp25 + tmp30
tmp32 = 1e-06
tmp33 = tmp31 + tmp32
tmp34 = tmp19 / tmp33
tmp35 = tmp1 - tmp0
tmp36 = tmp11 - tmp10
tmp37 = tmp36 + tmp32
tmp38 = tmp35 / tmp37
tmp39 = libdevice.atan(tmp38)
tmp40 = tmp4 - tmp3
tmp41 = tmp14 - tmp13
tmp42 = tmp41 + tmp32
tmp43 = tmp40 / tmp42
tmp44 = libdevice.atan(tmp43)
tmp45 = tmp39 - tmp44
tmp46 = tmp45 * tmp45
tmp47 = 0.4052847345693511
tmp48 = tmp46 * tmp47
tmp49 = triton_helpers.minimum(tmp4, tmp1)
tmp50 = triton_helpers.maximum(tmp3, tmp0)
tmp51 = tmp49 - tmp50
tmp52 = triton_helpers.maximum(tmp51, tmp23)
tmp53 = triton_helpers.minimum(tmp14, tmp11)
tmp54 = triton_helpers.maximum(tmp13, tmp10)
tmp55 = tmp53 - tmp54
tmp56 = triton_helpers.maximum(tmp55, tmp23)
tmp57 = tmp52 * tmp56
tmp58 = tmp40 * tmp41
tmp59 = tmp35 * tmp36
tmp60 = tmp58 + tmp59
tmp61 = tmp60 - tmp57
tmp62 = tmp61 + tmp32
tmp63 = tmp57 / tmp62
tmp64 = tmp48 * tmp48
tmp65 = 1.0
tmp66 = tmp65 - tmp63
tmp67 = tmp66 + tmp48
tmp68 = tmp64 / tmp67
tmp69 = tmp34 + tmp68
tmp70 = tmp63 - tmp69
tmp71 = tmp65 - tmp70
tl.store(in_out_ptr0 + (x2), tmp71, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add_6, add_7, sub_11, pow_3, left, add_8, add_9, sub_12, pow_4, right, rho2, pow_1, pow_2, add_2, c2, truediv_5, w2, sub_10, h2, truediv_3, atan, w1, sub_8, h1, truediv_4, atan_1, sub_13, pow_5, v, pow_6, sub_1, sub_2, ap, sub_3, sub_4, ag, add, overlap, sub_5, union, ious, sub_14, add_11, truediv_6, add_12, cious, loss], Original ATen: [aten.add, aten.sub, aten.pow, aten.div, aten.atan, aten.mul, aten.rsub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_atan_div_mul_pow_rsub_sub_0.run(buf4, arg1_1, arg0_1, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
def ciou(pred, target, eps=1e-07):
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
ious = overlap / union
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw ** 2 + ch ** 2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
left = (b2_x1 + b2_x2 - (b1_x1 + b1_x2)) ** 2 / 4
right = (b2_y1 + b2_y2 - (b1_y1 + b1_y2)) ** 2 / 4
rho2 = left + right
factor = 4 / math.pi ** 2
v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
cious = ious - (rho2 / c2 + v ** 2 / (1 - ious + v))
return cious
def ciou_loss(pred, target, eps=1e-07):
"""`Implementation of paper `Enhancing Geometric Factors into
Model Learning and Inference for Object Detection and Instance
Segmentation <https://arxiv.org/abs/2005.03572>`_.
Code is modified from https://github.com/Zzh-tju/CIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
cious = ciou(pred, target, eps)
loss = 1 - cious
return loss
class CIoULoss(nn.Module):
def __init__(self, eps=1e-06):
super(CIoULoss, self).__init__()
self.eps = eps
def forward(self, pred, target):
return ciou_loss(pred, target, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_atan_div_mul_pow_rsub_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = 0.25
tmp9 = tmp7 * tmp8
tmp12 = tmp10 + tmp11
tmp15 = tmp13 + tmp14
tmp16 = tmp12 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp17 * tmp8
tmp19 = tmp9 + tmp18
tmp20 = triton_helpers.maximum(tmp4, tmp1)
tmp21 = triton_helpers.minimum(tmp3, tmp0)
tmp22 = tmp20 - tmp21
tmp23 = 0.0
tmp24 = triton_helpers.maximum(tmp22, tmp23)
tmp25 = tmp24 * tmp24
tmp26 = triton_helpers.maximum(tmp14, tmp11)
tmp27 = triton_helpers.minimum(tmp13, tmp10)
tmp28 = tmp26 - tmp27
tmp29 = triton_helpers.maximum(tmp28, tmp23)
tmp30 = tmp29 * tmp29
tmp31 = tmp25 + tmp30
tmp32 = 1e-06
tmp33 = tmp31 + tmp32
tmp34 = tmp19 / tmp33
tmp35 = tmp1 - tmp0
tmp36 = tmp11 - tmp10
tmp37 = tmp36 + tmp32
tmp38 = tmp35 / tmp37
tmp39 = libdevice.atan(tmp38)
tmp40 = tmp4 - tmp3
tmp41 = tmp14 - tmp13
tmp42 = tmp41 + tmp32
tmp43 = tmp40 / tmp42
tmp44 = libdevice.atan(tmp43)
tmp45 = tmp39 - tmp44
tmp46 = tmp45 * tmp45
tmp47 = 0.4052847345693511
tmp48 = tmp46 * tmp47
tmp49 = triton_helpers.minimum(tmp4, tmp1)
tmp50 = triton_helpers.maximum(tmp3, tmp0)
tmp51 = tmp49 - tmp50
tmp52 = triton_helpers.maximum(tmp51, tmp23)
tmp53 = triton_helpers.minimum(tmp14, tmp11)
tmp54 = triton_helpers.maximum(tmp13, tmp10)
tmp55 = tmp53 - tmp54
tmp56 = triton_helpers.maximum(tmp55, tmp23)
tmp57 = tmp52 * tmp56
tmp58 = tmp40 * tmp41
tmp59 = tmp35 * tmp36
tmp60 = tmp58 + tmp59
tmp61 = tmp60 - tmp57
tmp62 = tmp61 + tmp32
tmp63 = tmp57 / tmp62
tmp64 = tmp48 * tmp48
tmp65 = 1.0
tmp66 = tmp65 - tmp63
tmp67 = tmp66 + tmp48
tmp68 = tmp64 / tmp67
tmp69 = tmp34 + tmp68
tmp70 = tmp63 - tmp69
tmp71 = tmp65 - tmp70
tl.store(in_out_ptr0 + x2, tmp71, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf4 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_atan_div_mul_pow_rsub_sub_0[grid(64)](buf4,
arg1_1, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf4,
def ciou(pred, target, eps=1e-07):
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
ious = overlap / union
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw ** 2 + ch ** 2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
left = (b2_x1 + b2_x2 - (b1_x1 + b1_x2)) ** 2 / 4
right = (b2_y1 + b2_y2 - (b1_y1 + b1_y2)) ** 2 / 4
rho2 = left + right
factor = 4 / math.pi ** 2
v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
cious = ious - (rho2 / c2 + v ** 2 / (1 - ious + v))
return cious
def ciou_loss(pred, target, eps=1e-07):
"""`Implementation of paper `Enhancing Geometric Factors into
Model Learning and Inference for Object Detection and Instance
Segmentation <https://arxiv.org/abs/2005.03572>`_.
Code is modified from https://github.com/Zzh-tju/CIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
cious = ciou(pred, target, eps)
loss = 1 - cious
return loss
class CIoULossNew(nn.Module):
def __init__(self, eps=1e-06):
super(CIoULossNew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zhangzhengde0225/SwinTrack | CIoULoss | false | 16,808 | [
"MIT"
] | 143 | 526be17f8ef266cb924c6939bd8dda23e9b73249 | https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249 |
LWS | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3m/c3mn2opkdbkjacwm6aj3r3ra3ikm226my4fcuo5efnexca6txvpk.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mul, aten.view]
# Source node to ATen node mapping:
# x_1 => mul, view_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %primals_4), kwargs = {})
# %view_3 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_2, [4, 4, 4, 4]), kwargs = {})
triton_poi_fused_mul_view_0 = async_compile.triton('triton_poi_fused_mul_view_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_view_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mul, aten.view]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_view_0.run(buf2, buf0, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
return (buf2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LWS(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super(LWS, self).__init__()
self.fc = nn.Linear(num_features, num_classes, bias=bias)
self.scales = nn.Parameter(torch.ones(num_classes))
for param_name, param in self.fc.named_parameters():
param.requires_grad = False
def forward(self, x):
x = self.fc(x)
x *= self.scales
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_view_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x4, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_mul_view_0[grid(256)](buf2, buf0, primals_4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
return buf2, buf0
class LWSNew(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super(LWSNew, self).__init__()
self.fc = nn.Linear(num_features, num_classes, bias=bias)
self.scales = nn.Parameter(torch.ones(num_classes))
for param_name, param in self.fc.named_parameters():
param.requires_grad = False
def forward(self, input_0):
primals_2 = self.scales
primals_1 = self.fc.weight
primals_4 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| zhangyongshun/BagofTricks-LT | LWS | false | 16,809 | [
"MIT"
] | 115 | aec4d9a552236c32231374b7b00fa5bf4208dae3 | https://github.com/zhangyongshun/BagofTricks-LT/tree/aec4d9a552236c32231374b7b00fa5bf4208dae3 |
RNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# combined => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
return (buf2, buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.autograd import Variable
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
return output, hidden
def initHidden(self, batch_size):
use_gpu = torch.cuda.is_available()
if use_gpu:
return Variable(torch.zeros(batch_size, self.hidden_size))
else:
return Variable(torch.zeros(batch_size, self.hidden_size))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
return buf2, buf1, buf0
class RNNNew(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNNNew, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
def initHidden(self, batch_size):
use_gpu = torch.cuda.is_available()
if use_gpu:
return Variable(torch.zeros(batch_size, self.hidden_size))
else:
return Variable(torch.zeros(batch_size, self.hidden_size))
def forward(self, input_0, input_1):
primals_3 = self.i2h.weight
primals_4 = self.i2h.bias
primals_5 = self.i2o.weight
primals_6 = self.i2o.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| zhiyongc/Graph_Convolutional_LSTM | RNN | false | 16,810 | [
"MIT"
] | 281 | a703b63e626b1e2563fe3f45d9714e468b1d4a0e | https://github.com/zhiyongc/Graph_Convolutional_LSTM/tree/a703b63e626b1e2563fe3f45d9714e468b1d4a0e |
CosineClassifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py
# Topologically Sorted Source Nodes: [x_in], Original ATen: [aten.div]
# Source node to ATen node mapping:
# x_in => div
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5p/c5pxebnz5awyqutoxsep2yikn2bm4pe5hn4le36ggdlffaemj74b.py
# Topologically Sorted Source Nodes: [x_out_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# x_out_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, %view), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 * tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_in], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.div]
triton_poi_fused_div_0.run(primals_2, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight, x_out], Original ATen: [aten.div, aten.mm]
extern_kernels.mm(buf0, buf1, out=buf2)
del buf1
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_out_1], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
return (buf3, primals_2, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
def cosine_fully_connected_layer(x_in, weight, scale=None, bias=None,
normalize_x=True, normalize_w=True):
assert x_in.dim() == 2
assert weight.dim() == 2
assert x_in.size(1) == weight.size(0)
if normalize_x:
x_in = F.normalize(x_in, p=2, dim=1, eps=1e-12)
if normalize_w:
weight = F.normalize(weight, p=2, dim=0, eps=1e-12)
x_out = torch.mm(x_in, weight)
if scale is not None:
x_out = x_out * scale.view(1, -1)
if bias is not None:
x_out = x_out + bias.view(1, -1)
return x_out
class CosineClassifier(nn.Module):
def __init__(self, num_channels, num_classes, scale=1.0, learn_scale=
False, bias=False, normalize_x=True, normalize_w=True):
super().__init__()
self.num_channels = num_channels
self.num_classes = num_classes
self.normalize_x = normalize_x
self.normalize_w = normalize_w
weight = torch.FloatTensor(num_classes, num_channels).normal_(0.0,
np.sqrt(2.0 / num_channels))
self.weight = nn.Parameter(weight, requires_grad=True)
if bias:
bias = torch.FloatTensor(num_classes).fill_(0.0)
self.bias = nn.Parameter(bias, requires_grad=True)
else:
self.bias = None
scale_cls = torch.FloatTensor(1).fill_(scale)
self.scale_cls = nn.Parameter(scale_cls, requires_grad=learn_scale)
def forward(self, x_in):
assert x_in.dim() == 2
return cosine_fully_connected_layer(x_in, self.weight.t(), scale=
self.scale_cls, bias=self.bias, normalize_x=self.normalize_x,
normalize_w=self.normalize_w)
def extra_repr(self):
s = ('num_channels={}, num_classes={}, scale_cls={} (learnable={})'
.format(self.num_channels, self.num_classes, self.scale_cls.
item(), self.scale_cls.requires_grad))
learnable = self.scale_cls.requires_grad
s = (
f'num_channels={self.num_channels}, num_classes={self.num_classes}, scale_cls={self.scale_cls.item()} (learnable={learnable}), normalize_x={self.normalize_x}, normalize_w={self.normalize_w}'
)
if self.bias is None:
s += ', bias=False'
return s
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 * tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_0[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, buf1, out=buf2)
del buf1
buf3 = buf2
del buf2
triton_poi_fused_mul_1[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf3, primals_2, primals_3, buf0
def cosine_fully_connected_layer(x_in, weight, scale=None, bias=None,
normalize_x=True, normalize_w=True):
assert x_in.dim() == 2
assert weight.dim() == 2
assert x_in.size(1) == weight.size(0)
if normalize_x:
x_in = F.normalize(x_in, p=2, dim=1, eps=1e-12)
if normalize_w:
weight = F.normalize(weight, p=2, dim=0, eps=1e-12)
x_out = torch.mm(x_in, weight)
if scale is not None:
x_out = x_out * scale.view(1, -1)
if bias is not None:
x_out = x_out + bias.view(1, -1)
return x_out
class CosineClassifierNew(nn.Module):
def __init__(self, num_channels, num_classes, scale=1.0, learn_scale=
False, bias=False, normalize_x=True, normalize_w=True):
super().__init__()
self.num_channels = num_channels
self.num_classes = num_classes
self.normalize_x = normalize_x
self.normalize_w = normalize_w
weight = torch.FloatTensor(num_classes, num_channels).normal_(0.0,
np.sqrt(2.0 / num_channels))
self.weight = nn.Parameter(weight, requires_grad=True)
if bias:
bias = torch.FloatTensor(num_classes).fill_(0.0)
self.bias = nn.Parameter(bias, requires_grad=True)
else:
self.bias = None
scale_cls = torch.FloatTensor(1).fill_(scale)
self.scale_cls = nn.Parameter(scale_cls, requires_grad=learn_scale)
def extra_repr(self):
s = ('num_channels={}, num_classes={}, scale_cls={} (learnable={})'
.format(self.num_channels, self.num_classes, self.scale_cls.
item(), self.scale_cls.requires_grad))
learnable = self.scale_cls.requires_grad
s = (
f'num_channels={self.num_channels}, num_classes={self.num_classes}, scale_cls={self.scale_cls.item()} (learnable={learnable}), normalize_x={self.normalize_x}, normalize_w={self.normalize_w}'
)
if self.bias is None:
s += ', bias=False'
return s
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.scale_cls
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| zheang01/FACT | CosineClassifier | false | 16,811 | [
"MIT"
] | 65 | a877cc86acc4d29fb7589c8ac571c8aef09e5fd8 | https://github.com/zheang01/FACT/tree/a877cc86acc4d29fb7589c8ac571c8aef09e5fd8 |
GIoULoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/bh/cbhvjbbhq7diklnxyyrpjlul7tu4x3hq7ybfpkuukjhsejqgewma.py
# Topologically Sorted Source Nodes: [sub, sub_1, area1, sub_2, sub_3, area2, add, overlap, union, eps, union_1, ious, enclose_area, enclose_area_1, sub_7, truediv_1, gious, loss], Original ATen: [aten.sub, aten.mul, aten.add, aten.lift_fresh, aten.maximum, aten.div, aten.rsub]
# Source node to ATen node mapping:
# add => add
# area1 => mul
# area2 => mul_1
# enclose_area => mul_3
# enclose_area_1 => maximum_3
# eps => full_default
# gious => sub_8
# ious => div
# loss => sub_9
# overlap => mul_2
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# sub_7 => sub_7
# truediv_1 => div_1
# union => sub_5
# union_1 => maximum_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select, %select_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_2, %select_3), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_4, %select_5), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_6, %select_7), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %sub_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, %select_9), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul_2), kwargs = {})
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([1], 9.999999974752427e-07), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_2 : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%sub_5, %full_default), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, %maximum_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_10, %select_11), kwargs = {})
# %maximum_3 : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%mul_3, %full_default), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%maximum_3, %maximum_2), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_7, %maximum_3), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_1), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sub_8), kwargs = {})
triton_poi_fused_add_div_lift_fresh_maximum_mul_rsub_sub_0 = async_compile.triton('triton_poi_fused_add_div_lift_fresh_maximum_mul_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_lift_fresh_maximum_mul_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_lift_fresh_maximum_mul_rsub_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp2 - tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp11 - tmp14
tmp16 = triton_helpers.maximum(tmp15, tmp7)
tmp17 = tmp8 * tmp16
tmp18 = tmp0 - tmp3
tmp19 = tmp9 - tmp12
tmp20 = tmp18 * tmp19
tmp21 = tmp1 - tmp4
tmp22 = tmp10 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp20 + tmp23
tmp25 = tmp24 - tmp17
tmp26 = triton_helpers.maximum(tmp0, tmp1)
tmp27 = triton_helpers.minimum(tmp3, tmp4)
tmp28 = tmp26 - tmp27
tmp29 = triton_helpers.maximum(tmp28, tmp7)
tmp30 = triton_helpers.maximum(tmp9, tmp10)
tmp31 = triton_helpers.minimum(tmp12, tmp13)
tmp32 = tmp30 - tmp31
tmp33 = triton_helpers.maximum(tmp32, tmp7)
tmp34 = tmp29 * tmp33
tmp35 = 9.999999974752427e-07
tmp36 = triton_helpers.maximum(tmp34, tmp35)
tmp37 = triton_helpers.maximum(tmp25, tmp35)
tmp38 = tmp17 / tmp37
tmp39 = tmp36 - tmp37
tmp40 = tmp39 / tmp36
tmp41 = tmp38 - tmp40
tmp42 = 1.0
tmp43 = tmp42 - tmp41
tl.store(in_out_ptr0 + (x0), tmp43, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sub, sub_1, area1, sub_2, sub_3, area2, add, overlap, union, eps, union_1, ious, enclose_area, enclose_area_1, sub_7, truediv_1, gious, loss], Original ATen: [aten.sub, aten.mul, aten.add, aten.lift_fresh, aten.maximum, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_lift_fresh_maximum_mul_rsub_sub_0.run(buf3, arg0_1, arg1_1, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
def fp16_clamp(x, min=None, max=None):
if not x.is_cuda and x.dtype == torch.float16:
return x.float().clamp(min, max).half()
return x.clamp(min, max)
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-06):
"""Calculate overlap between two set of bboxes.
FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889
Note:
Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',
there are some new generated variable when calculating IOU
using bbox_overlaps function:
1) is_aligned is False
area1: M x 1
area2: N x 1
lt: M x N x 2
rb: M x N x 2
wh: M x N x 2
overlap: M x N x 1
union: M x N x 1
ious: M x N x 1
Total memory:
S = (9 x N x M + N + M) * 4 Byte,
When using FP16, we can reduce:
R = (9 x N x M + N + M) * 4 / 2 Byte
R large than (N + M) * 4 * 2 is always true when N and M >= 1.
Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,
N + 1 < 3 * N, when N or M is 1.
Given M = 40 (ground truth), N = 400000 (three anchor boxes
in per grid, FPN, R-CNNs),
R = 275 MB (one times)
A special case (dense detection), M = 512 (ground truth),
R = 3516 MB = 3.43 GB
When the batch size is B, reduce:
B x R
Therefore, CUDA memory runs out frequently.
Experiments on GeForce RTX 2080Ti (11019 MiB):
| dtype | M | N | Use | Real | Ideal |
|:----:|:----:|:----:|:----:|:----:|:----:|
| FP32 | 512 | 400000 | 8020 MiB | -- | -- |
| FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB |
| FP32 | 40 | 400000 | 1540 MiB | -- | -- |
| FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB |
2) is_aligned is True
area1: N x 1
area2: N x 1
lt: N x 2
rb: N x 2
wh: N x 2
overlap: N x 1
union: N x 1
ious: N x 1
Total memory:
S = 11 x N * 4 Byte
When using FP16, we can reduce:
R = 11 x N * 4 / 2 Byte
So do the 'giou' (large than 'iou').
Time-wise, FP16 is generally faster than FP32.
When gpu_assign_thr is not -1, it takes more time on cpu
but not reduce memory.
There, we can reduce half the memory and keep the speed.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
siam_pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union), "iof" (intersection over
foreground) or "giou" (generalized intersection over union).
Default "iou".
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0
assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] -
bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] -
bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2])
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2])
rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[...,
None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[...,
None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
def giou_loss(pred, target, eps=1e-07):
"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
Box Regression <https://arxiv.org/abs/1902.09630>`_.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
loss = 1 - gious
return loss
class GIoULoss(nn.Module):
def __init__(self, eps=1e-06):
super(GIoULoss, self).__init__()
self.eps = eps
def forward(self, pred, target):
return giou_loss(pred, target, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_lift_fresh_maximum_mul_rsub_sub_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp2 - tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp11 - tmp14
tmp16 = triton_helpers.maximum(tmp15, tmp7)
tmp17 = tmp8 * tmp16
tmp18 = tmp0 - tmp3
tmp19 = tmp9 - tmp12
tmp20 = tmp18 * tmp19
tmp21 = tmp1 - tmp4
tmp22 = tmp10 - tmp13
tmp23 = tmp21 * tmp22
tmp24 = tmp20 + tmp23
tmp25 = tmp24 - tmp17
tmp26 = triton_helpers.maximum(tmp0, tmp1)
tmp27 = triton_helpers.minimum(tmp3, tmp4)
tmp28 = tmp26 - tmp27
tmp29 = triton_helpers.maximum(tmp28, tmp7)
tmp30 = triton_helpers.maximum(tmp9, tmp10)
tmp31 = triton_helpers.minimum(tmp12, tmp13)
tmp32 = tmp30 - tmp31
tmp33 = triton_helpers.maximum(tmp32, tmp7)
tmp34 = tmp29 * tmp33
tmp35 = 9.999999974752427e-07
tmp36 = triton_helpers.maximum(tmp34, tmp35)
tmp37 = triton_helpers.maximum(tmp25, tmp35)
tmp38 = tmp17 / tmp37
tmp39 = tmp36 - tmp37
tmp40 = tmp39 / tmp36
tmp41 = tmp38 - tmp40
tmp42 = 1.0
tmp43 = tmp42 - tmp41
tl.store(in_out_ptr0 + x0, tmp43, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_div_lift_fresh_maximum_mul_rsub_sub_0[grid(64)](
buf3, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf3,
def fp16_clamp(x, min=None, max=None):
if not x.is_cuda and x.dtype == torch.float16:
return x.float().clamp(min, max).half()
return x.clamp(min, max)
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-06):
"""Calculate overlap between two set of bboxes.
FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889
Note:
Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',
there are some new generated variable when calculating IOU
using bbox_overlaps function:
1) is_aligned is False
area1: M x 1
area2: N x 1
lt: M x N x 2
rb: M x N x 2
wh: M x N x 2
overlap: M x N x 1
union: M x N x 1
ious: M x N x 1
Total memory:
S = (9 x N x M + N + M) * 4 Byte,
When using FP16, we can reduce:
R = (9 x N x M + N + M) * 4 / 2 Byte
R large than (N + M) * 4 * 2 is always true when N and M >= 1.
Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,
N + 1 < 3 * N, when N or M is 1.
Given M = 40 (ground truth), N = 400000 (three anchor boxes
in per grid, FPN, R-CNNs),
R = 275 MB (one times)
A special case (dense detection), M = 512 (ground truth),
R = 3516 MB = 3.43 GB
When the batch size is B, reduce:
B x R
Therefore, CUDA memory runs out frequently.
Experiments on GeForce RTX 2080Ti (11019 MiB):
| dtype | M | N | Use | Real | Ideal |
|:----:|:----:|:----:|:----:|:----:|:----:|
| FP32 | 512 | 400000 | 8020 MiB | -- | -- |
| FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB |
| FP32 | 40 | 400000 | 1540 MiB | -- | -- |
| FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB |
2) is_aligned is True
area1: N x 1
area2: N x 1
lt: N x 2
rb: N x 2
wh: N x 2
overlap: N x 1
union: N x 1
ious: N x 1
Total memory:
S = 11 x N * 4 Byte
When using FP16, we can reduce:
R = 11 x N * 4 / 2 Byte
So do the 'giou' (large than 'iou').
Time-wise, FP16 is generally faster than FP32.
When gpu_assign_thr is not -1, it takes more time on cpu
but not reduce memory.
There, we can reduce half the memory and keep the speed.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
siam_pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union), "iof" (intersection over
foreground) or "giou" (generalized intersection over union).
Default "iou".
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0
assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] -
bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] -
bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2])
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2])
rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:])
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[...,
None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[...,
None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
def giou_loss(pred, target, eps=1e-07):
"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
Box Regression <https://arxiv.org/abs/1902.09630>`_.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
loss = 1 - gious
return loss
class GIoULossNew(nn.Module):
def __init__(self, eps=1e-06):
super(GIoULossNew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zhangzhengde0225/SwinTrack | GIoULoss | false | 16,812 | [
"MIT"
] | 143 | 526be17f8ef266cb924c6939bd8dda23e9b73249 | https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249 |
BoundedIoULoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xp/cxp6gt32tjw5xk3ncwyla2qmz2iltlazcrfhzocm5pizeihvurl4.py
# Topologically Sorted Source Nodes: [stack, lt, mul_8, mul_9, truediv_6, sub_12, loss], Original ATen: [aten.stack, aten.lt, aten.mul, aten.div, aten.sub, aten.where]
# Source node to ATen node mapping:
# loss => where
# lt => lt
# mul_8 => mul_8
# mul_9 => mul_9
# stack => cat
# sub_12 => sub_12
# truediv_6 => div_6
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1, %unsqueeze_2, %unsqueeze_3], -1), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%view, 0.2), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, 0.5), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_8, %view), kwargs = {})
# %div_6 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_9, 0.2), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, 0.1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div_6, %sub_12), kwargs = {})
triton_poi_fused_div_lt_mul_stack_sub_where_0 = async_compile.triton('triton_poi_fused_div_lt_mul_stack_sub_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_lt_mul_stack_sub_where_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_lt_mul_stack_sub_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (32 + x1 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (x1 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tmp6 + tmp5
tmp9 = 0.5
tmp10 = tmp8 * tmp9
tmp11 = tl.load(in_ptr1 + (x1 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tl.load(in_ptr1 + (32 + x1 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tmp13 * tmp9
tmp15 = tmp10 - tmp14
tmp16 = tl_math.abs(tmp15)
tmp17 = 2.0
tmp18 = tmp16 * tmp17
tmp19 = tmp7 - tmp18
tmp20 = tmp7 + tmp18
tmp21 = 0.001
tmp22 = tmp20 + tmp21
tmp23 = tmp19 / tmp22
tmp24 = 0.0
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = 1.0
tmp27 = tmp26 - tmp25
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp4, tmp27, tmp28)
tmp30 = tmp0 >= tmp3
tmp31 = tl.full([1], 2, tl.int64)
tmp32 = tmp0 < tmp31
tmp33 = tmp30 & tmp32
tmp34 = tl.load(in_ptr0 + (48 + x1 + (64*x2)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr0 + (16 + x1 + (64*x2)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tmp34 - tmp35
tmp37 = tmp35 + tmp34
tmp38 = tmp37 * tmp9
tmp39 = tl.load(in_ptr1 + (16 + x1 + (64*x2)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp40 = tl.load(in_ptr1 + (48 + x1 + (64*x2)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 + tmp40
tmp42 = tmp41 * tmp9
tmp43 = tmp38 - tmp42
tmp44 = tl_math.abs(tmp43)
tmp45 = tmp44 * tmp17
tmp46 = tmp36 - tmp45
tmp47 = tmp36 + tmp45
tmp48 = tmp47 + tmp21
tmp49 = tmp46 / tmp48
tmp50 = triton_helpers.maximum(tmp49, tmp24)
tmp51 = tmp26 - tmp50
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp33, tmp51, tmp52)
tmp54 = tmp0 >= tmp31
tmp55 = tl.full([1], 3, tl.int64)
tmp56 = tmp0 < tmp55
tmp57 = tmp54 & tmp56
tmp58 = tl.load(in_ptr0 + (32 + x1 + (64*x2)), tmp57 & xmask, eviction_policy='evict_last', other=0.0)
tmp59 = tl.load(in_ptr0 + (x1 + (64*x2)), tmp57 & xmask, eviction_policy='evict_last', other=0.0)
tmp60 = tmp58 - tmp59
tmp61 = tl.load(in_ptr1 + (32 + x1 + (64*x2)), tmp57 & xmask, eviction_policy='evict_last', other=0.0)
tmp62 = tl.load(in_ptr1 + (x1 + (64*x2)), tmp57 & xmask, eviction_policy='evict_last', other=0.0)
tmp63 = tmp61 - tmp62
tmp64 = tmp63 + tmp21
tmp65 = tmp60 / tmp64
tmp66 = tmp60 + tmp21
tmp67 = tmp63 / tmp66
tmp68 = triton_helpers.minimum(tmp65, tmp67)
tmp69 = tmp26 - tmp68
tmp70 = tl.full(tmp69.shape, 0.0, tmp69.dtype)
tmp71 = tl.where(tmp57, tmp69, tmp70)
tmp72 = tmp0 >= tmp55
tmp73 = tl.full([1], 4, tl.int64)
tmp74 = tmp0 < tmp73
tmp75 = tl.load(in_ptr0 + (48 + x1 + (64*x2)), tmp72 & xmask, eviction_policy='evict_last', other=0.0)
tmp76 = tl.load(in_ptr0 + (16 + x1 + (64*x2)), tmp72 & xmask, eviction_policy='evict_last', other=0.0)
tmp77 = tmp75 - tmp76
tmp78 = tl.load(in_ptr1 + (48 + x1 + (64*x2)), tmp72 & xmask, eviction_policy='evict_last', other=0.0)
tmp79 = tl.load(in_ptr1 + (16 + x1 + (64*x2)), tmp72 & xmask, eviction_policy='evict_last', other=0.0)
tmp80 = tmp78 - tmp79
tmp81 = tmp80 + tmp21
tmp82 = tmp77 / tmp81
tmp83 = tmp77 + tmp21
tmp84 = tmp80 / tmp83
tmp85 = triton_helpers.minimum(tmp82, tmp84)
tmp86 = tmp26 - tmp85
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp72, tmp86, tmp87)
tmp89 = tl.where(tmp57, tmp71, tmp88)
tmp90 = tl.where(tmp33, tmp53, tmp89)
tmp91 = tl.where(tmp4, tmp29, tmp90)
tmp92 = 0.2
tmp93 = tmp91 < tmp92
tmp94 = tmp91 * tmp9
tmp95 = tmp94 * tmp91
tmp96 = 5.0
tmp97 = tmp95 * tmp96
tmp98 = 0.1
tmp99 = tmp91 - tmp98
tmp100 = tl.where(tmp93, tmp97, tmp99)
tl.store(in_out_ptr0 + (x3), tmp100, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 64), (64, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [stack, lt, mul_8, mul_9, truediv_6, sub_12, loss], Original ATen: [aten.stack, aten.lt, aten.mul, aten.div, aten.sub, aten.where]
stream0 = get_raw_stream(0)
triton_poi_fused_div_lt_mul_stack_sub_where_0.run(buf1, arg1_1, arg0_1, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
def bounded_iou_loss(pred, target, beta=0.2, eps=0.001):
"""BIoULoss.
This is an implementation of paper
`Improving Object Localization with Fitness NMS and Bounded IoU Loss.
<https://arxiv.org/abs/1711.00164>`_.
Args:
pred (torch.Tensor): Predicted bboxes.
target (torch.Tensor): Target bboxes.
beta (float): beta parameter in smoothl1.
eps (float): eps to avoid NaN.
"""
pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
pred_w = pred[:, 2] - pred[:, 0]
pred_h = pred[:, 3] - pred[:, 1]
with torch.no_grad():
target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
target_ctry = (target[:, 1] + target[:, 3]) * 0.5
target_w = target[:, 2] - target[:, 0]
target_h = target[:, 3] - target[:, 1]
dx = target_ctrx - pred_ctrx
dy = target_ctry - pred_ctry
loss_dx = 1 - torch.max((target_w - 2 * dx.abs()) / (target_w + 2 * dx.
abs() + eps), torch.zeros_like(dx))
loss_dy = 1 - torch.max((target_h - 2 * dy.abs()) / (target_h + 2 * dy.
abs() + eps), torch.zeros_like(dy))
loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / (target_w +
eps))
loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / (target_h +
eps))
loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], dim=-1).view(
loss_dx.size(0), -1)
loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
loss_comb - 0.5 * beta)
return loss
class BoundedIoULoss(nn.Module):
def __init__(self, beta=0.2, eps=0.001):
super(BoundedIoULoss, self).__init__()
self.beta = beta
self.eps = eps
def forward(self, pred, target):
return bounded_iou_loss(pred, target, self.beta, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_lt_mul_stack_sub_where_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (32 + x1 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (x1 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tmp6 + tmp5
tmp9 = 0.5
tmp10 = tmp8 * tmp9
tmp11 = tl.load(in_ptr1 + (x1 + 64 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp12 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tmp13 * tmp9
tmp15 = tmp10 - tmp14
tmp16 = tl_math.abs(tmp15)
tmp17 = 2.0
tmp18 = tmp16 * tmp17
tmp19 = tmp7 - tmp18
tmp20 = tmp7 + tmp18
tmp21 = 0.001
tmp22 = tmp20 + tmp21
tmp23 = tmp19 / tmp22
tmp24 = 0.0
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = 1.0
tmp27 = tmp26 - tmp25
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp4, tmp27, tmp28)
tmp30 = tmp0 >= tmp3
tmp31 = tl.full([1], 2, tl.int64)
tmp32 = tmp0 < tmp31
tmp33 = tmp30 & tmp32
tmp34 = tl.load(in_ptr0 + (48 + x1 + 64 * x2), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr0 + (16 + x1 + 64 * x2), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tmp34 - tmp35
tmp37 = tmp35 + tmp34
tmp38 = tmp37 * tmp9
tmp39 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp40 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 + tmp40
tmp42 = tmp41 * tmp9
tmp43 = tmp38 - tmp42
tmp44 = tl_math.abs(tmp43)
tmp45 = tmp44 * tmp17
tmp46 = tmp36 - tmp45
tmp47 = tmp36 + tmp45
tmp48 = tmp47 + tmp21
tmp49 = tmp46 / tmp48
tmp50 = triton_helpers.maximum(tmp49, tmp24)
tmp51 = tmp26 - tmp50
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp33, tmp51, tmp52)
tmp54 = tmp0 >= tmp31
tmp55 = tl.full([1], 3, tl.int64)
tmp56 = tmp0 < tmp55
tmp57 = tmp54 & tmp56
tmp58 = tl.load(in_ptr0 + (32 + x1 + 64 * x2), tmp57 & xmask,
eviction_policy='evict_last', other=0.0)
tmp59 = tl.load(in_ptr0 + (x1 + 64 * x2), tmp57 & xmask,
eviction_policy='evict_last', other=0.0)
tmp60 = tmp58 - tmp59
tmp61 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), tmp57 & xmask,
eviction_policy='evict_last', other=0.0)
tmp62 = tl.load(in_ptr1 + (x1 + 64 * x2), tmp57 & xmask,
eviction_policy='evict_last', other=0.0)
tmp63 = tmp61 - tmp62
tmp64 = tmp63 + tmp21
tmp65 = tmp60 / tmp64
tmp66 = tmp60 + tmp21
tmp67 = tmp63 / tmp66
tmp68 = triton_helpers.minimum(tmp65, tmp67)
tmp69 = tmp26 - tmp68
tmp70 = tl.full(tmp69.shape, 0.0, tmp69.dtype)
tmp71 = tl.where(tmp57, tmp69, tmp70)
tmp72 = tmp0 >= tmp55
tl.full([1], 4, tl.int64)
tmp75 = tl.load(in_ptr0 + (48 + x1 + 64 * x2), tmp72 & xmask,
eviction_policy='evict_last', other=0.0)
tmp76 = tl.load(in_ptr0 + (16 + x1 + 64 * x2), tmp72 & xmask,
eviction_policy='evict_last', other=0.0)
tmp77 = tmp75 - tmp76
tmp78 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), tmp72 & xmask,
eviction_policy='evict_last', other=0.0)
tmp79 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), tmp72 & xmask,
eviction_policy='evict_last', other=0.0)
tmp80 = tmp78 - tmp79
tmp81 = tmp80 + tmp21
tmp82 = tmp77 / tmp81
tmp83 = tmp77 + tmp21
tmp84 = tmp80 / tmp83
tmp85 = triton_helpers.minimum(tmp82, tmp84)
tmp86 = tmp26 - tmp85
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp72, tmp86, tmp87)
tmp89 = tl.where(tmp57, tmp71, tmp88)
tmp90 = tl.where(tmp33, tmp53, tmp89)
tmp91 = tl.where(tmp4, tmp29, tmp90)
tmp92 = 0.2
tmp93 = tmp91 < tmp92
tmp94 = tmp91 * tmp9
tmp95 = tmp94 * tmp91
tmp96 = 5.0
tmp97 = tmp95 * tmp96
tmp98 = 0.1
tmp99 = tmp91 - tmp98
tmp100 = tl.where(tmp93, tmp97, tmp99)
tl.store(in_out_ptr0 + x3, tmp100, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 64), (64, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_div_lt_mul_stack_sub_where_0[grid(256)](buf1,
arg1_1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def bounded_iou_loss(pred, target, beta=0.2, eps=0.001):
"""BIoULoss.
This is an implementation of paper
`Improving Object Localization with Fitness NMS and Bounded IoU Loss.
<https://arxiv.org/abs/1711.00164>`_.
Args:
pred (torch.Tensor): Predicted bboxes.
target (torch.Tensor): Target bboxes.
beta (float): beta parameter in smoothl1.
eps (float): eps to avoid NaN.
"""
pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
pred_w = pred[:, 2] - pred[:, 0]
pred_h = pred[:, 3] - pred[:, 1]
with torch.no_grad():
target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
target_ctry = (target[:, 1] + target[:, 3]) * 0.5
target_w = target[:, 2] - target[:, 0]
target_h = target[:, 3] - target[:, 1]
dx = target_ctrx - pred_ctrx
dy = target_ctry - pred_ctry
loss_dx = 1 - torch.max((target_w - 2 * dx.abs()) / (target_w + 2 * dx.
abs() + eps), torch.zeros_like(dx))
loss_dy = 1 - torch.max((target_h - 2 * dy.abs()) / (target_h + 2 * dy.
abs() + eps), torch.zeros_like(dy))
loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / (target_w +
eps))
loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / (target_h +
eps))
loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], dim=-1).view(
loss_dx.size(0), -1)
loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
loss_comb - 0.5 * beta)
return loss
class BoundedIoULossNew(nn.Module):
def __init__(self, beta=0.2, eps=0.001):
super(BoundedIoULossNew, self).__init__()
self.beta = beta
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zhangzhengde0225/SwinTrack | BoundedIoULoss | false | 16,813 | [
"MIT"
] | 143 | 526be17f8ef266cb924c6939bd8dda23e9b73249 | https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249 |
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/nj/cnjqzm7hm3u6ggjfvpspnl6pii56jckgwol3ydau4qesjyoteutl.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%permute, %permute_1], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x2 = (xindex // 32)
x1 = (xindex // 8) % 4
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x2) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x2) + (16*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lz/clzc7c4rqtr7ky6jrepxpu2dlmeo4y66gzcis5bqhwixpt7ktopj.py
# Topologically Sorted Source Nodes: [energy], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# energy => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/i5/ci57psuuueutwfqpm57dmpddhnflxjjxpqzf6cwcsnd2zbemfstl.py
# Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# repeat_1 => repeat_1
# Graph fragment:
# %repeat_1 : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_5, [4, 1]), kwargs = {})
triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lt/cltwbpokq7b7gvah2tjf27qlzw6vpmwfuzs3xfk7mhbxym753kvi.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rr/crrmj7r54x5uk325xkhuskxp4m5prz3fpx53yc2st4o5pwbhq32p.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 128, grid=grid(128), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [energy], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf2, primals_4, 64, grid=grid(64), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_5, buf3, 16, grid=grid(16), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [energy_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 1, 4), (4, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 4), (4, 0, 1), 0), reinterpret_tensor(primals_1, (4, 4, 4), (4, 16, 1), 0), out=buf7)
del buf6
return (reinterpret_tensor(buf7, (4, 4), (4, 1), 0), reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf2, buf4, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0), reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from queue import *
from math import *
class Attention(nn.Module):
def __init__(self, hidden_size):
super(Attention, self).__init__()
self.attn = nn.Linear(hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.randn(hidden_size))
stdv = 1.0 / math.sqrt(self.v.size(0))
self.v.data.uniform_(-stdv, stdv)
def forward(self, hidden, context):
"""
hidden: [batch, hidden_size]
context: [seq, batch, hidden_size]
return the context vector for decoding: [batch, hidden]
"""
timestep = context.shape[0]
h = hidden.repeat(timestep, 1, 1).transpose(0, 1)
context = context.transpose(0, 1)
attn_energies = self.score(h, context)
score = F.softmax(attn_energies, dim=1).unsqueeze(1)
context = torch.bmm(score, context).squeeze(1)
return context
def score(self, hidden, context):
"""
hidden: [batch, seq, hidden]
context: [batch, seq, hidden]
"""
energy = torch.tanh(self.attn(torch.cat([hidden, context], 2)))
energy = energy.transpose(1, 2)
v = self.v.repeat(context.shape[0], 1).unsqueeze(1)
energy = torch.bmm(v, energy)
return energy.squeeze(1)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
from queue import *
from math import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x2 = xindex // 32
x1 = xindex // 8 % 4
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x2 + 16 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_5, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0
), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 1, 4), (4, 4, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 4), (4, 0, 1), 0
), reinterpret_tensor(primals_1, (4, 4, 4), (4, 16, 1), 0), out
=buf7)
del buf6
return reinterpret_tensor(buf7, (4, 4), (4, 1), 0), reinterpret_tensor(buf0
, (16, 8), (8, 1), 0), buf2, buf4, reinterpret_tensor(primals_1, (4,
4, 4), (4, 1, 16), 0), reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 4), 0
)
class AttentionNew(nn.Module):
def __init__(self, hidden_size):
super(AttentionNew, self).__init__()
self.attn = nn.Linear(hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.randn(hidden_size))
stdv = 1.0 / math.sqrt(self.v.size(0))
self.v.data.uniform_(-stdv, stdv)
def score(self, hidden, context):
"""
hidden: [batch, seq, hidden]
context: [batch, seq, hidden]
"""
energy = torch.tanh(self.attn(torch.cat([hidden, context], 2)))
energy = energy.transpose(1, 2)
v = self.v.repeat(context.shape[0], 1).unsqueeze(1)
energy = torch.bmm(v, energy)
return energy.squeeze(1)
def forward(self, input_0, input_1):
primals_4 = self.v
primals_3 = self.attn.weight
primals_5 = self.attn.bias
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| zhongerqiandan/OpenDialog | Attention | false | 16,814 | [
"MIT"
] | 98 | f478b2a912c8c742da5ced510ac40da59217ddb3 | https://github.com/zhongerqiandan/OpenDialog/tree/f478b2a912c8c742da5ced510ac40da59217ddb3 |
segmentation_layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qt/cqtywk2a4wy7frtx4av5g7bvv4kxfvxaxuv64y65j6ebodrpqxvc.py
# Topologically Sorted Source Nodes: [segm], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# segm => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 32, 64, 64), (131072, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [segm], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [segm], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 65536, grid=grid(65536), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 32, 64, 64), (131072, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class segmentation_layer(nn.Module):
def __init__(self, args):
super(segmentation_layer, self).__init__()
self.segm_layer = nn.Conv2d(32, args.snumclass, kernel_size=1)
def forward(self, featMap):
segm = self.segm_layer(featMap)
return segm
def get_inputs():
return [torch.rand([4, 32, 64, 64])]
def get_init_inputs():
return [[], {'args': _mock_config(snumclass=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 32, 64, 64), (131072, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(65536)](buf1, primals_2, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class segmentation_layerNew(nn.Module):
def __init__(self, args):
super(segmentation_layerNew, self).__init__()
self.segm_layer = nn.Conv2d(32, args.snumclass, kernel_size=1)
def forward(self, input_0):
primals_1 = self.segm_layer.weight
primals_2 = self.segm_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| zhenpeiyang/RelativePose | segmentation_layer | false | 16,815 | [
"BSD-3-Clause"
] | 144 | 2e9fdf5003c5952cf610f8c6d891519b9e9e014b | https://github.com/zhenpeiyang/RelativePose/tree/2e9fdf5003c5952cf610f8c6d891519b9e9e014b |
MyUpsample2 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/5a/c5a7xyzg52tfjoddmloxit4l27nteeg5esqmrvjfhym4rycl4xuk.py
# Topologically Sorted Source Nodes: [reshape], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# reshape => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 2) % 4
x3 = (xindex // 16)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (4*x3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [reshape], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
class MyUpsample2(nn.Module):
def forward(self, x):
return x[:, :, :, None, :, None].expand(-1, -1, -1, 2, -1, 2).reshape(x
.size(0), x.size(1), x.size(2) * 2, x.size(3) * 2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 4
x3 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1
), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0),
class MyUpsample2New(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| zigonk/ReSC | MyUpsample2 | false | 16,816 | [
"MIT"
] | 57 | c816365b0410f521974060ef0cc6eaa1dd09b63a | https://github.com/zigonk/ReSC/tree/c816365b0410f521974060ef0cc6eaa1dd09b63a |
BCEFocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/b7/cb75qt7z64mnaffeamqnuaaplvueju2cgxw5qekdgfdk2csy6ief.py
# Topologically Sorted Source Nodes: [pt, sub, pow_1, mul, mul_1, log, mul_2, pow_2, mul_3, sub_1, mul_4, sub_2, log_1, mul_5, loss, loss_1, mul_6, truediv], Original ATen: [aten.sigmoid, aten.rsub, aten.pow, aten.mul, aten.log, aten.sub, aten.sum, aten.div]
# Source node to ATen node mapping:
# log => log
# log_1 => log_1
# loss => sub_3
# loss_1 => sum_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# pow_1 => pow_1
# pow_2 => pow_2
# pt => sigmoid
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# truediv => div
# Graph fragment:
# %sigmoid : [num_users=4] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, -0.25), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %arg1_1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sigmoid,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %log), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sigmoid, 2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, 0.75), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %sub_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub_2,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %log_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mul_5), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_6, 54), kwargs = {})
triton_per_fused_div_log_mul_pow_rsub_sigmoid_sub_sum_0 = async_compile.triton('triton_per_fused_div_log_mul_pow_rsub_sigmoid_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_log_mul_pow_rsub_sigmoid_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_log_mul_pow_rsub_sigmoid_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp7 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp3 * tmp3
tmp5 = -0.25
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp9 = tl_math.log(tmp1)
tmp10 = tmp8 * tmp9
tmp11 = tmp1 * tmp1
tmp12 = 0.75
tmp13 = tmp11 * tmp12
tmp14 = tmp2 - tmp7
tmp15 = tmp13 * tmp14
tmp16 = tl_math.log(tmp3)
tmp17 = tmp15 * tmp16
tmp18 = tmp10 - tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = tmp21 * tmp2
tmp23 = 0.018518518518518517
tmp24 = tmp22 * tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp24, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [pt, sub, pow_1, mul, mul_1, log, mul_2, pow_2, mul_3, sub_1, mul_4, sub_2, log_1, mul_5, loss, loss_1, mul_6, truediv], Original ATen: [aten.sigmoid, aten.rsub, aten.pow, aten.mul, aten.log, aten.sub, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_log_mul_pow_rsub_sigmoid_sub_sum_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class BCEFocalLoss(torch.nn.Module):
"""
二分类的Focalloss alpha 固定
"""
def __init__(self, gamma=2, alpha=0.25, reduction='sum', loss_weight=1.0):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, _input, target):
pt = torch.sigmoid(_input)
alpha = self.alpha
loss = -alpha * (1 - pt) ** self.gamma * target * torch.log(pt) - (
1 - alpha) * pt ** self.gamma * (1 - target) * torch.log(1 - pt)
if self.reduction == 'elementwise_mean':
loss = torch.mean(loss)
elif self.reduction == 'sum':
loss = torch.sum(loss)
return loss * self.loss_weight / 54
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_log_mul_pow_rsub_sigmoid_sub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp7 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp3 * tmp3
tmp5 = -0.25
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp9 = tl_math.log(tmp1)
tmp10 = tmp8 * tmp9
tmp11 = tmp1 * tmp1
tmp12 = 0.75
tmp13 = tmp11 * tmp12
tmp14 = tmp2 - tmp7
tmp15 = tmp13 * tmp14
tmp16 = tl_math.log(tmp3)
tmp17 = tmp15 * tmp16
tmp18 = tmp10 - tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = tmp21 * tmp2
tmp23 = 0.018518518518518517
tmp24 = tmp22 * tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp24, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_log_mul_pow_rsub_sigmoid_sub_sum_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCEFocalLossNew(torch.nn.Module):
"""
二分类的Focalloss alpha 固定
"""
def __init__(self, gamma=2, alpha=0.25, reduction='sum', loss_weight=1.0):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zhiqi-li/Panoptic-SegFormer | BCEFocalLoss | false | 16,817 | [
"Apache-2.0"
] | 97 | cdb9b68059e9ef825a3f7079c37aa835b1711227 | https://github.com/zhiqi-li/Panoptic-SegFormer/tree/cdb9b68059e9ef825a3f7079c37aa835b1711227 |
LAM_Gconv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/5b/c5bzfn7bhkjj4wyjmog7qmb337j5qyjg6xopozmn7jp72xkf4mgv.py
# Topologically Sorted Source Nodes: [A_hat, mul, L], Original ATen: [aten.repeat, aten.mul]
# Source node to ATen node mapping:
# A_hat => repeat
# L => mul_1
# mul => mul
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [4, 1, 1]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %repeat), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view_1), kwargs = {})
triton_poi_fused_mul_repeat_0 = async_compile.triton('triton_poi_fused_mul_repeat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x3 = xindex % 16
x0 = xindex % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (12 + x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = -0.5
tmp10 = libdevice.pow(tmp8, tmp9)
tmp12 = tmp10 * tmp11
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp19 + tmp7
tmp21 = libdevice.pow(tmp20, tmp9)
tmp22 = tmp12 * tmp21
tl.store(out_ptr0 + (x5), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ey/cey6dsgmzj2byupf73e6nwt5fetf5ne2sa57kzcmy7ejvaqhqb72.py
# Topologically Sorted Source Nodes: [X_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# X_1 => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_10, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [A_hat, mul, L], Original ATen: [aten.repeat, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_repeat_0.run(primals_2, buf0, 64, grid=grid(64), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(buf0, primals_1, out=buf1)
del primals_1
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
del primals_3
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0); del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [X_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_4, buf4, 64, grid=grid(64), stream=stream0)
del primals_4
return (buf3, reinterpret_tensor(buf1, (16, 4), (4, 1), 0), buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LAM_Gconv(nn.Module):
def __init__(self, in_features, out_features, activation=nn.ReLU(
inplace=True)):
super(LAM_Gconv, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_features)
self.activation = activation
def laplacian(self, A_hat):
D_hat = (torch.sum(A_hat, 0) + 1e-05) ** -0.5
L = D_hat * A_hat * D_hat
return L
def laplacian_batch(self, A_hat):
batch, N = A_hat.shape[:2]
D_hat = (torch.sum(A_hat, 1) + 1e-05) ** -0.5
L = D_hat.view(batch, N, 1) * A_hat * D_hat.view(batch, 1, N)
return L
def forward(self, X, A):
batch = X.size(0)
A_hat = A.unsqueeze(0).repeat(batch, 1, 1)
X = self.fc(torch.bmm(self.laplacian_batch(A_hat), X))
if self.activation is not None:
X = self.activation(X)
return X
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x3 = xindex % 16
x0 = xindex % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (12 + x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = -0.5
tmp10 = libdevice.pow(tmp8, tmp9)
tmp12 = tmp10 * tmp11
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp19 + tmp7
tmp21 = libdevice.pow(tmp20, tmp9)
tmp22 = tmp12 * tmp21
tl.store(out_ptr0 + x5, tmp22, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_repeat_0[grid(64)](primals_2, buf0, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf0, primals_1, out=buf1)
del primals_1
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
del primals_3
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf3,
primals_4, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
return buf3, reinterpret_tensor(buf1, (16, 4), (4, 1), 0), buf4
class LAM_GconvNew(nn.Module):
def __init__(self, in_features, out_features, activation=nn.ReLU(
inplace=True)):
super(LAM_GconvNew, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_features)
self.activation = activation
def laplacian(self, A_hat):
D_hat = (torch.sum(A_hat, 0) + 1e-05) ** -0.5
L = D_hat * A_hat * D_hat
return L
def laplacian_batch(self, A_hat):
batch, N = A_hat.shape[:2]
D_hat = (torch.sum(A_hat, 1) + 1e-05) ** -0.5
L = D_hat.view(batch, N, 1) * A_hat * D_hat.view(batch, 1, N)
return L
def forward(self, input_0, input_1):
primals_2 = self.fc.weight
primals_4 = self.fc.bias
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| zhaoweixi/GraFormer | LAM_Gconv | false | 16,818 | [
"BSD-2-Clause"
] | 384 | 0a0a04014cdf157c11ab8e952862efa27c6a1980 | https://github.com/zhaoweixi/GraFormer/tree/0a0a04014cdf157c11ab8e952862efa27c6a1980 |
IRHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xe/cxeozkscph2oahfwv2cautkmh3j7bmrjgwn4ez6peblrr7dhjnjg.py
# Topologically Sorted Source Nodes: [inpt], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# inpt => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%squeeze_1, %squeeze, %squeeze_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 9
x1 = (xindex // 9)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 9, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-5) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2k/c2kiox2wvshockbbzjlycxwhjeigavlrfwuvcpbcbxpipbm7d7k6.py
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 9), (9, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (2, 4), (4, 1))
assert_size_stride(primals_7, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_3, out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(primals_2, (4, 4, 1), (4, 1, 1), 0), out=buf1)
buf2 = empty_strided_cuda((4, 9), (9, 1), torch.float32)
# Topologically Sorted Source Nodes: [inpt], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf1, primals_2, buf2, 36, grid=grid(36), stream=stream0)
del buf1
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (9, 4), (1, 9), 0), out=buf3)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf4, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [score_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
return (buf5, buf2, buf4, primals_6, primals_4, reinterpret_tensor(primals_2, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 9), (9, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from queue import *
from math import *
class IRHead(nn.Module):
def __init__(self, hidden_size, dropout=0.5):
super(IRHead, self).__init__()
self.M = nn.Parameter(torch.randn(hidden_size, hidden_size))
self.hidden_layer = nn.Linear(hidden_size * 2 + 1, hidden_size)
self.opt_layer = nn.Linear(hidden_size, 2)
self.hidden_drop = nn.Dropout(p=dropout)
def forward(self, src_embed, tgt_embed):
"""
src_embed: [batch, hidden]
tgt_embed: [batch, hidden]
return the score: [batch, 2]
"""
src_hidden = src_embed.unsqueeze(1)
tgt_hidden = tgt_embed.unsqueeze(2)
score = torch.bmm(torch.matmul(src_hidden, self.M), tgt_hidden
).squeeze(2)
src_hidden = src_hidden.squeeze(1)
tgt_hidden = tgt_hidden.squeeze(2)
inpt = torch.cat([src_hidden, score, tgt_hidden], 1)
inpt = self.hidden_drop(torch.tanh(self.hidden_layer(inpt)))
score = self.opt_layer(inpt)
return score
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from queue import *
from math import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 9
x1 = xindex // 9
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 9, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-5 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 9), (9, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (2, 4), (4, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_3, out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0
), reinterpret_tensor(primals_2, (4, 4, 1), (4, 1, 1), 0), out=buf1
)
buf2 = empty_strided_cuda((4, 9), (9, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(36)](primals_1, buf1, primals_2, buf2,
36, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
buf3 = buf0
del buf0
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (9, 4), (1, 9
), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_tanh_1[grid(16)](buf4, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6,
(4, 2), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
return buf5, buf2, buf4, primals_6, primals_4, reinterpret_tensor(primals_2
, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (
1, 4), 0)
class IRHeadNew(nn.Module):
def __init__(self, hidden_size, dropout=0.5):
super(IRHeadNew, self).__init__()
self.M = nn.Parameter(torch.randn(hidden_size, hidden_size))
self.hidden_layer = nn.Linear(hidden_size * 2 + 1, hidden_size)
self.opt_layer = nn.Linear(hidden_size, 2)
self.hidden_drop = nn.Dropout(p=dropout)
def forward(self, input_0, input_1):
primals_1 = self.M
primals_4 = self.hidden_layer.weight
primals_5 = self.hidden_layer.bias
primals_6 = self.opt_layer.weight
primals_7 = self.opt_layer.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| zhongerqiandan/OpenDialog | IRHead | false | 16,819 | [
"MIT"
] | 98 | f478b2a912c8c742da5ced510ac40da59217ddb3 | https://github.com/zhongerqiandan/OpenDialog/tree/f478b2a912c8c742da5ced510ac40da59217ddb3 |
DenseBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/kd/ckd764hwhglrngbkconrttgyxhxmqe2cvebc3b5wgaygxufgft7x.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %where], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 36
x0 = xindex % 16
x2 = (xindex // 576)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 36, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (512*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-4) + x1), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.2
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp6, tmp16, tmp17)
tmp19 = tl.where(tmp4, tmp5, tmp18)
tl.store(out_ptr0 + (x3), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zf/czfpxtgyyptiwxavyzsauitefkwwe2pgvdzfymmqus5nqxorkewl.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %where, %where_1], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4352
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 68
x0 = xindex % 16
x2 = (xindex // 1088)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 36, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (512*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-4) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 68, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tl.load(in_ptr3 + (x0 + (16*((-36) + x1)) + (512*x2)), tmp20 & xmask, other=0.0)
tmp24 = tl.load(in_ptr4 + ((-36) + x1), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 > tmp13
tmp27 = tmp25 * tmp15
tmp28 = tl.where(tmp26, tmp25, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp20, tmp28, tmp29)
tmp31 = tl.where(tmp9, tmp19, tmp30)
tmp32 = tl.where(tmp4, tmp5, tmp31)
tl.store(out_ptr0 + (x3), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yi/cyiz2vv363vx3iycoog6h2haml7zjoymg2a7mnhznpuupbpv3vjj.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %where, %where_1, %where_2], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 100
x0 = xindex % 16
x2 = (xindex // 1600)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 36, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (512*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-4) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 68, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + (16*((-36) + x1)) + (512*x2)), tmp23 & xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + ((-36) + x1), tmp23 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tmp33 = tl.full([1], 100, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tl.load(in_ptr5 + (x0 + (16*((-68) + x1)) + (512*x2)), tmp32 & xmask, other=0.0)
tmp36 = tl.load(in_ptr6 + ((-68) + x1), tmp32 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tmp37 > tmp13
tmp39 = tmp37 * tmp15
tmp40 = tl.where(tmp38, tmp37, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.where(tmp23, tmp31, tmp42)
tmp44 = tl.where(tmp9, tmp19, tmp43)
tmp45 = tl.where(tmp4, tmp5, tmp44)
tl.store(out_ptr0 + (x3), tmp45, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wu/cwuy254w5swharfwtpch3jd3yxcztwqq6y5meuqsk6hsehrctb3v.py
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_3 => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %where, %where_1, %where_2, %where_3], 1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8448
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 132
x0 = xindex % 16
x2 = (xindex // 2112)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 36, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (512*x2)), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-4) + x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 68, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + (16*((-36) + x1)) + (512*x2)), tmp23 & xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + ((-36) + x1), tmp23 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tmp33 = tl.full([1], 100, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tmp32 & tmp34
tmp36 = tl.load(in_ptr5 + (x0 + (16*((-68) + x1)) + (512*x2)), tmp35 & xmask, other=0.0)
tmp37 = tl.load(in_ptr6 + ((-68) + x1), tmp35 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp38 > tmp13
tmp40 = tmp38 * tmp15
tmp41 = tl.where(tmp39, tmp38, tmp40)
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp35, tmp41, tmp42)
tmp44 = tmp0 >= tmp33
tmp45 = tl.full([1], 132, tl.int64)
tmp46 = tmp0 < tmp45
tmp47 = tl.load(in_ptr7 + (x0 + (16*((-100) + x1)) + (512*x2)), tmp44 & xmask, other=0.0)
tmp48 = tl.load(in_ptr8 + ((-100) + x1), tmp44 & xmask, eviction_policy='evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tmp49 > tmp13
tmp51 = tmp49 * tmp15
tmp52 = tl.where(tmp50, tmp49, tmp51)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp44, tmp52, tmp53)
tmp55 = tl.where(tmp35, tmp43, tmp54)
tmp56 = tl.where(tmp23, tmp31, tmp55)
tmp57 = tl.where(tmp9, tmp19, tmp56)
tmp58 = tl.where(tmp4, tmp5, tmp57)
tl.store(out_ptr0 + (x3), tmp58, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vs/cvsrdu7meh3cqqfd24cmr3qxqgwp6bjx6xewaycwqngiexspii2c.py
# Topologically Sorted Source Nodes: [x5], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x5 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5g/c5gx2srplrmxtdiu62kehg6sc7xsfdu6ogiccmo2q6bnni7idadq.py
# Topologically Sorted Source Nodes: [conv2d_3, x4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x4 => gt_3, mul_3, where_3
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
# %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_3, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 36, 3, 3), (324, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (32, 68, 3, 3), (612, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (32, 100, 3, 3), (900, 9, 3, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (4, 132, 3, 3), (1188, 9, 3, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 4, 4), (512, 16, 4, 1))
buf1 = empty_strided_cuda((4, 36, 4, 4), (576, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_3, buf0, primals_2, buf1, 2304, grid=grid(2304), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 4, 4), (512, 16, 4, 1))
buf3 = empty_strided_cuda((4, 68, 4, 4), (1088, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(primals_3, buf0, primals_2, buf2, primals_5, buf3, 4352, grid=grid(4352), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 4, 4), (512, 16, 4, 1))
buf5 = empty_strided_cuda((4, 100, 4, 4), (1600, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(primals_3, buf0, primals_2, buf2, primals_5, buf4, primals_7, buf5, 6400, grid=grid(6400), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 4, 4), (512, 16, 4, 1))
buf7 = empty_strided_cuda((4, 132, 4, 4), (2112, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(primals_3, buf0, primals_2, buf2, primals_5, buf4, primals_7, buf6, primals_9, buf7, 8448, grid=grid(8448), stream=stream0)
# Topologically Sorted Source Nodes: [x5], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x5], Original ATen: [aten.convolution]
triton_poi_fused_convolution_4.run(buf9, primals_11, 256, grid=grid(256), stream=stream0)
del primals_11
buf10 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, x4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf6, primals_9, buf10, 2048, grid=grid(2048), stream=stream0)
del buf6
del primals_9
buf11 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, x3], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf4, primals_7, buf11, 2048, grid=grid(2048), stream=stream0)
del buf4
del primals_7
buf12 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, x2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf2, primals_5, buf12, 2048, grid=grid(2048), stream=stream0)
del buf2
del primals_5
buf13 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, x1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf0, primals_2, buf13, 2048, grid=grid(2048), stream=stream0)
del buf0
del primals_2
return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf10, buf11, buf12, buf13, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 36, 3, 3), (324, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 68, 3, 3), (612, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 100, 3, 3), (900, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 132, 3, 3), (1188, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.init as init
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
def initialize_weights_xavier(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.xavier_normal_(m.weight)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
class DenseBlock(nn.Module):
def __init__(self, channel_in, channel_out, init='xavier', gc=32, bias=True
):
super(DenseBlock, self).__init__()
self.conv1 = nn.Conv2d(channel_in, gc, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(channel_in + gc, gc, 3, 1, 1, bias=bias)
self.conv3 = nn.Conv2d(channel_in + 2 * gc, gc, 3, 1, 1, bias=bias)
self.conv4 = nn.Conv2d(channel_in + 3 * gc, gc, 3, 1, 1, bias=bias)
self.conv5 = nn.Conv2d(channel_in + 4 * gc, channel_out, 3, 1, 1,
bias=bias)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
if init == 'xavier':
initialize_weights_xavier([self.conv1, self.conv2, self.conv3,
self.conv4], 0.1)
else:
initialize_weights([self.conv1, self.conv2, self.conv3, self.
conv4], 0.1)
initialize_weights(self.conv5, 0)
def forward(self, x):
x1 = self.lrelu(self.conv1(x))
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return x5
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel_in': 4, 'channel_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 36
x0 = xindex % 16
x2 = xindex // 576
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 36, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 512 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-4 + x1), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.2
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp6, tmp16, tmp17)
tmp19 = tl.where(tmp4, tmp5, tmp18)
tl.store(out_ptr0 + x3, tmp19, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4352
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 68
x0 = xindex % 16
x2 = xindex // 1088
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 36, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 512 * x2), tmp9 &
xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tl.full([1], 68, tl.int64)
tmp23 = tl.load(in_ptr3 + (x0 + 16 * (-36 + x1) + 512 * x2), tmp20 &
xmask, other=0.0)
tmp24 = tl.load(in_ptr4 + (-36 + x1), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 > tmp13
tmp27 = tmp25 * tmp15
tmp28 = tl.where(tmp26, tmp25, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp20, tmp28, tmp29)
tmp31 = tl.where(tmp9, tmp19, tmp30)
tmp32 = tl.where(tmp4, tmp5, tmp31)
tl.store(out_ptr0 + x3, tmp32, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 100
x0 = xindex % 16
x2 = xindex // 1600
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 36, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 512 * x2), tmp9 &
xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 68, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + 16 * (-36 + x1) + 512 * x2), tmp23 &
xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + (-36 + x1), tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tl.full([1], 100, tl.int64)
tmp35 = tl.load(in_ptr5 + (x0 + 16 * (-68 + x1) + 512 * x2), tmp32 &
xmask, other=0.0)
tmp36 = tl.load(in_ptr6 + (-68 + x1), tmp32 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tmp37 > tmp13
tmp39 = tmp37 * tmp15
tmp40 = tl.where(tmp38, tmp37, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.where(tmp23, tmp31, tmp42)
tmp44 = tl.where(tmp9, tmp19, tmp43)
tmp45 = tl.where(tmp4, tmp5, tmp44)
tl.store(out_ptr0 + x3, tmp45, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8448
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 132
x0 = xindex % 16
x2 = xindex // 2112
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 36, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 512 * x2), tmp9 &
xmask, other=0.0)
tmp11 = tl.load(in_ptr2 + (-4 + x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 68, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tmp20 & tmp22
tmp24 = tl.load(in_ptr3 + (x0 + 16 * (-36 + x1) + 512 * x2), tmp23 &
xmask, other=0.0)
tmp25 = tl.load(in_ptr4 + (-36 + x1), tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 > tmp13
tmp28 = tmp26 * tmp15
tmp29 = tl.where(tmp27, tmp26, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp23, tmp29, tmp30)
tmp32 = tmp0 >= tmp21
tmp33 = tl.full([1], 100, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tmp32 & tmp34
tmp36 = tl.load(in_ptr5 + (x0 + 16 * (-68 + x1) + 512 * x2), tmp35 &
xmask, other=0.0)
tmp37 = tl.load(in_ptr6 + (-68 + x1), tmp35 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tmp38 > tmp13
tmp40 = tmp38 * tmp15
tmp41 = tl.where(tmp39, tmp38, tmp40)
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp35, tmp41, tmp42)
tmp44 = tmp0 >= tmp33
tl.full([1], 132, tl.int64)
tmp47 = tl.load(in_ptr7 + (x0 + 16 * (-100 + x1) + 512 * x2), tmp44 &
xmask, other=0.0)
tmp48 = tl.load(in_ptr8 + (-100 + x1), tmp44 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp49 = tmp47 + tmp48
tmp50 = tmp49 > tmp13
tmp51 = tmp49 * tmp15
tmp52 = tl.where(tmp50, tmp49, tmp51)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp44, tmp52, tmp53)
tmp55 = tl.where(tmp35, tmp43, tmp54)
tmp56 = tl.where(tmp23, tmp31, tmp55)
tmp57 = tl.where(tmp9, tmp19, tmp56)
tmp58 = tl.where(tmp4, tmp5, tmp57)
tl.store(out_ptr0 + x3, tmp58, xmask)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 36, 3, 3), (324, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 68, 3, 3), (612, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 100, 3, 3), (900, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (4, 132, 3, 3), (1188, 9, 3, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 4, 4), (512, 16, 4, 1))
buf1 = empty_strided_cuda((4, 36, 4, 4), (576, 16, 4, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(2304)](primals_3, buf0, primals_2, buf1,
2304, XBLOCK=256, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 4, 4), (512, 16, 4, 1))
buf3 = empty_strided_cuda((4, 68, 4, 4), (1088, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_1[grid(4352)](primals_3, buf0, primals_2, buf2,
primals_5, buf3, 4352, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 4, 4), (512, 16, 4, 1))
buf5 = empty_strided_cuda((4, 100, 4, 4), (1600, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(6400)](primals_3, buf0, primals_2, buf2,
primals_5, buf4, primals_7, buf5, 6400, XBLOCK=256, num_warps=4,
num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 4, 4), (512, 16, 4, 1))
buf7 = empty_strided_cuda((4, 132, 4, 4), (2112, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_3[grid(8448)](primals_3, buf0, primals_2, buf2,
primals_5, buf4, primals_7, buf6, primals_9, buf7, 8448, XBLOCK
=128, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_4[grid(256)](buf9, primals_11, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(
2048)](buf6, primals_9, buf10, 2048, XBLOCK=128, num_warps=4,
num_stages=1)
del buf6
del primals_9
buf11 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(
2048)](buf4, primals_7, buf11, 2048, XBLOCK=128, num_warps=4,
num_stages=1)
del buf4
del primals_7
buf12 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(
2048)](buf2, primals_5, buf12, 2048, XBLOCK=128, num_warps=4,
num_stages=1)
del buf2
del primals_5
buf13 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(
2048)](buf0, primals_2, buf13, 2048, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del primals_2
return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf1, buf3, buf5, buf7, buf10, buf11, buf12, buf13)
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
def initialize_weights_xavier(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.xavier_normal_(m.weight)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
class DenseBlockNew(nn.Module):
def __init__(self, channel_in, channel_out, init='xavier', gc=32, bias=True
):
super(DenseBlockNew, self).__init__()
self.conv1 = nn.Conv2d(channel_in, gc, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(channel_in + gc, gc, 3, 1, 1, bias=bias)
self.conv3 = nn.Conv2d(channel_in + 2 * gc, gc, 3, 1, 1, bias=bias)
self.conv4 = nn.Conv2d(channel_in + 3 * gc, gc, 3, 1, 1, bias=bias)
self.conv5 = nn.Conv2d(channel_in + 4 * gc, channel_out, 3, 1, 1,
bias=bias)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
if init == 'xavier':
initialize_weights_xavier([self.conv1, self.conv2, self.conv3,
self.conv4], 0.1)
else:
initialize_weights([self.conv1, self.conv2, self.conv3, self.
conv4], 0.1)
initialize_weights(self.conv5, 0)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| yzxing87/Invertible-ISP | DenseBlock | false | 16,820 | [
"MIT"
] | 246 | 344dd333dd2a075f6a9e4ffc445dc387ca3014c4 | https://github.com/yzxing87/Invertible-ISP/tree/344dd333dd2a075f6a9e4ffc445dc387ca3014c4 |
LSTM | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# combined => cat
# Graph fragment:
# %cat : [num_users=5] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/y4/cy4wemxyp7xojkfvwo2wnyjxjj5pa3puyxccu2igp5husj2j24jq.py
# Topologically Sorted Source Nodes: [f, i, o, C, mul, mul_1, Cell_State, tanh_1, Hidden_State], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add]
# Source node to ATen node mapping:
# C => tanh
# Cell_State => add
# Hidden_State => mul_2
# f => sigmoid
# i => sigmoid_1
# mul => mul
# mul_1 => mul_1
# o => sigmoid_2
# tanh_1 => tanh_1
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {})
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm_1,), kwargs = {})
# %sigmoid_2 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm_2,), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%addmm_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_11), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %tanh), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_2, %tanh_1), kwargs = {})
triton_poi_fused_add_mul_sigmoid_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask)
tmp10 = tl.load(in_ptr4 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tl.sigmoid(tmp4)
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tmp3 + tmp8
tmp11 = tl.sigmoid(tmp10)
tmp12 = libdevice.tanh(tmp9)
tmp13 = tmp11 * tmp12
tl.store(out_ptr0 + (x0), tmp9, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 8), (8, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf0, reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf3)
del primals_7
del primals_8
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, buf0, reinterpret_tensor(primals_9, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4)
del primals_10
del primals_9
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [f, i, o, C, mul, mul_1, Cell_State, tanh_1, Hidden_State], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add]
triton_poi_fused_add_mul_sigmoid_tanh_1.run(buf1, primals_11, buf2, buf4, buf3, buf5, buf6, 16, grid=grid(16), stream=stream0)
return (buf6, buf5, primals_11, buf0, buf1, buf2, buf3, buf4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
class LSTM(nn.Module):
def __init__(self, input_size, cell_size, hidden_size):
"""
cell_size is the size of cell_state.
hidden_size is the size of hidden_state, or say the output_state of each step
"""
super(LSTM, self).__init__()
self.cell_size = cell_size
self.hidden_size = hidden_size
self.fl = nn.Linear(input_size + hidden_size, hidden_size)
self.il = nn.Linear(input_size + hidden_size, hidden_size)
self.ol = nn.Linear(input_size + hidden_size, hidden_size)
self.Cl = nn.Linear(input_size + hidden_size, hidden_size)
def forward(self, input, Hidden_State, Cell_State):
combined = torch.cat((input, Hidden_State), 1)
f = F.sigmoid(self.fl(combined))
i = F.sigmoid(self.il(combined))
o = F.sigmoid(self.ol(combined))
C = F.tanh(self.Cl(combined))
Cell_State = f * Cell_State + i * C
Hidden_State = o * F.tanh(Cell_State)
return Hidden_State, Cell_State
def loop(self, inputs):
batch_size = inputs.size(0)
time_step = inputs.size(1)
Hidden_State, Cell_State = self.initHidden(batch_size)
for i in range(time_step):
Hidden_State, Cell_State = self.forward(torch.squeeze(inputs[:,
i:i + 1, :]), Hidden_State, Cell_State)
return Hidden_State, Cell_State
def initHidden(self, batch_size):
use_gpu = torch.cuda.is_available()
if use_gpu:
Hidden_State = Variable(torch.zeros(batch_size, self.hidden_size))
Cell_State = Variable(torch.zeros(batch_size, self.hidden_size))
return Hidden_State, Cell_State
else:
Hidden_State = Variable(torch.zeros(batch_size, self.hidden_size))
Cell_State = Variable(torch.zeros(batch_size, self.hidden_size))
return Hidden_State, Cell_State
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'cell_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask)
tmp10 = tl.load(in_ptr4 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tl.sigmoid(tmp4)
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tmp3 + tmp8
tmp11 = tl.sigmoid(tmp10)
tmp12 = libdevice.tanh(tmp9)
tmp13 = tmp11 * tmp12
tl.store(out_ptr0 + x0, tmp9, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 8), (8, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 8), (8, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf0, reinterpret_tensor(primals_7,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf3)
del primals_7
del primals_8
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_10, buf0, reinterpret_tensor(primals_9,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4)
del primals_10
del primals_9
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_tanh_1[grid(16)](buf1, primals_11,
buf2, buf4, buf3, buf5, buf6, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf6, buf5, primals_11, buf0, buf1, buf2, buf3, buf4, buf5
class LSTMNew(nn.Module):
def __init__(self, input_size, cell_size, hidden_size):
"""
cell_size is the size of cell_state.
hidden_size is the size of hidden_state, or say the output_state of each step
"""
super(LSTMNew, self).__init__()
self.cell_size = cell_size
self.hidden_size = hidden_size
self.fl = nn.Linear(input_size + hidden_size, hidden_size)
self.il = nn.Linear(input_size + hidden_size, hidden_size)
self.ol = nn.Linear(input_size + hidden_size, hidden_size)
self.Cl = nn.Linear(input_size + hidden_size, hidden_size)
def loop(self, inputs):
batch_size = inputs.size(0)
time_step = inputs.size(1)
Hidden_State, Cell_State = self.initHidden(batch_size)
for i in range(time_step):
Hidden_State, Cell_State = self.forward(torch.squeeze(inputs[:,
i:i + 1, :]), Hidden_State, Cell_State)
return Hidden_State, Cell_State
def initHidden(self, batch_size):
use_gpu = torch.cuda.is_available()
if use_gpu:
Hidden_State = Variable(torch.zeros(batch_size, self.hidden_size))
Cell_State = Variable(torch.zeros(batch_size, self.hidden_size))
return Hidden_State, Cell_State
else:
Hidden_State = Variable(torch.zeros(batch_size, self.hidden_size))
Cell_State = Variable(torch.zeros(batch_size, self.hidden_size))
return Hidden_State, Cell_State
def forward(self, input_0, input_1, input_2):
primals_3 = self.fl.weight
primals_4 = self.fl.bias
primals_5 = self.il.weight
primals_6 = self.il.bias
primals_7 = self.ol.weight
primals_8 = self.ol.bias
primals_9 = self.Cl.weight
primals_10 = self.Cl.bias
primals_1 = input_0
primals_2 = input_1
primals_11 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
| zhiyongc/Graph_Convolutional_LSTM | LSTM | false | 16,821 | [
"MIT"
] | 281 | a703b63e626b1e2563fe3f45d9714e468b1d4a0e | https://github.com/zhiyongc/Graph_Convolutional_LSTM/tree/a703b63e626b1e2563fe3f45d9714e468b1d4a0e |
BG_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/jf/cjfautajfzvzjhx55l4lny5pwcqh7kuudth5rmntahgldhtt2e2b.py
# Topologically Sorted Source Nodes: [mul, mul_1, mul_2, mul_3, mul_4, mul_5, mul_6, mul_7, loss], Original ATen: [aten.mul, aten.sub, aten.abs, aten.mean]
# Source node to ATen node mapping:
# loss => abs_1, mean, sub
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %select_1), kwargs = {})
# %select_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%arg0_1, %mul, 0, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, %select_5), kwargs = {})
# %select_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%arg1_1, %mul_1, 0, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, %select_9), kwargs = {})
# %select_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %mul_2, 0, 1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_13, %select_14), kwargs = {})
# %select_scatter_default_3 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %mul_3, 0, 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_18, %select_19), kwargs = {})
# %select_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_2, %mul_4, 0, 2), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_23, %select_24), kwargs = {})
# %select_scatter_default_5 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_3, %mul_5, 0, 2), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_28, %select_29), kwargs = {})
# %select_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_4, %mul_6, 0, 3), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_33, %select_34), kwargs = {})
# %select_scatter_default_7 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_5, %mul_7, 0, 3), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_scatter_default_6, %select_scatter_default_7), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
triton_per_fused_abs_mean_mul_sub_0 = async_compile.triton('triton_per_fused_abs_mean_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 14, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = (rindex // 64)
r0 = rindex % 64
r2 = rindex
tmp3 = tl.load(in_ptr0 + (64 + r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (64 + r0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (r2), None)
tmp14 = tl.load(in_ptr2 + (64 + r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (r0), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (r2), None)
tmp23 = tl.load(in_ptr0 + (192 + r0), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + (192 + r0), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (128 + r0), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (128 + r0), None, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr2 + (192 + r0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr2 + (128 + r0), None, eviction_policy='evict_last')
tmp0 = r1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tmp3 * tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = tmp0 == tmp6
tmp10 = tmp8 * tmp9
tmp12 = tl.where(tmp7, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp5, tmp12)
tmp15 = tmp3 * tmp14
tmp17 = tmp8 * tmp16
tmp19 = tl.where(tmp7, tmp17, tmp18)
tmp20 = tl.where(tmp2, tmp15, tmp19)
tmp21 = tl.full([1], 3, tl.int32)
tmp22 = tmp0 == tmp21
tmp25 = tmp23 * tmp24
tmp26 = tl.full([1], 2, tl.int32)
tmp27 = tmp0 == tmp26
tmp30 = tmp28 * tmp29
tmp31 = tl.where(tmp27, tmp30, tmp13)
tmp32 = tl.where(tmp22, tmp25, tmp31)
tmp34 = tmp23 * tmp33
tmp36 = tmp28 * tmp35
tmp37 = tl.where(tmp27, tmp36, tmp20)
tmp38 = tl.where(tmp22, tmp34, tmp37)
tmp39 = tmp32 - tmp38
tmp40 = tl_math.abs(tmp39)
tmp41 = tl.broadcast_to(tmp40, [RBLOCK])
tmp43 = triton_helpers.promote_to_tensor(tl.sum(tmp41, 0))
tmp44 = 256.0
tmp45 = tmp43 / tmp44
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp45, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [mul, mul_1, mul_2, mul_3, mul_4, mul_5, mul_6, mul_7, loss], Original ATen: [aten.mul, aten.sub, aten.abs, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_mean_mul_sub_0.run(buf3, arg2_1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data.distributed
class BG_loss(nn.Module):
def __init__(self):
super(BG_loss, self).__init__()
self.loss = nn.L1Loss()
def forward(self, real_imgs, fake_imgs, masks):
real_imgs_ = real_imgs.clone()
fake_imgs_ = fake_imgs.clone()
for index in range(len(real_imgs)):
real_imgs_[index] = masks[index] * real_imgs[index]
fake_imgs_[index] = masks[index] * fake_imgs[index]
loss = self.loss(real_imgs_, fake_imgs_)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex // 64
r0 = rindex % 64
r2 = rindex
tmp3 = tl.load(in_ptr0 + (64 + r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (64 + r0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + r2, None)
tmp14 = tl.load(in_ptr2 + (64 + r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + r0, None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + r2, None)
tmp23 = tl.load(in_ptr0 + (192 + r0), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + (192 + r0), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (128 + r0), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (128 + r0), None, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr2 + (192 + r0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr2 + (128 + r0), None, eviction_policy='evict_last')
tmp0 = r1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tmp3 * tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = tmp0 == tmp6
tmp10 = tmp8 * tmp9
tmp12 = tl.where(tmp7, tmp10, tmp11)
tmp13 = tl.where(tmp2, tmp5, tmp12)
tmp15 = tmp3 * tmp14
tmp17 = tmp8 * tmp16
tmp19 = tl.where(tmp7, tmp17, tmp18)
tmp20 = tl.where(tmp2, tmp15, tmp19)
tmp21 = tl.full([1], 3, tl.int32)
tmp22 = tmp0 == tmp21
tmp25 = tmp23 * tmp24
tmp26 = tl.full([1], 2, tl.int32)
tmp27 = tmp0 == tmp26
tmp30 = tmp28 * tmp29
tmp31 = tl.where(tmp27, tmp30, tmp13)
tmp32 = tl.where(tmp22, tmp25, tmp31)
tmp34 = tmp23 * tmp33
tmp36 = tmp28 * tmp35
tmp37 = tl.where(tmp27, tmp36, tmp20)
tmp38 = tl.where(tmp22, tmp34, tmp37)
tmp39 = tmp32 - tmp38
tmp40 = tl_math.abs(tmp39)
tmp41 = tl.broadcast_to(tmp40, [RBLOCK])
tmp43 = triton_helpers.promote_to_tensor(tl.sum(tmp41, 0))
tmp44 = 256.0
tmp45 = tmp43 / tmp44
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp45, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
get_raw_stream(0)
triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf3, arg2_1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf3,
class BG_lossNew(nn.Module):
def __init__(self):
super(BG_lossNew, self).__init__()
self.loss = nn.L1Loss()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| ziqi-jin/OpenUnReID | BG_loss | false | 16,822 | [
"Apache-2.0"
] | 344 | 50eb516945c418398cac890029d1b366c27c0185 | https://github.com/ziqi-jin/OpenUnReID/tree/50eb516945c418398cac890029d1b366c27c0185 |
SmoothSoftmax | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fj/cfjn6mfcgqjbhisnycp4nzhk5ssnvr3ltss62nbqujigbrfjpg7i.py
# Topologically Sorted Source Nodes: [logistic_value, sum_1, truediv], Original ATen: [aten.sigmoid, aten.sum, aten.div]
# Source node to ATen node mapping:
# logistic_value => sigmoid
# sum_1 => sum_1
# truediv => div
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sigmoid, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sigmoid, %sum_1), kwargs = {})
triton_poi_fused_div_sigmoid_sum_0 = async_compile.triton('triton_poi_fused_div_sigmoid_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sigmoid_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sigmoid_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.sigmoid(tmp2)
tmp5 = tl.sigmoid(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl.sigmoid(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl.sigmoid(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tmp1 / tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logistic_value, sum_1, truediv], Original ATen: [aten.sigmoid, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sigmoid_sum_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch import nn
class SmoothSoftmax(nn.Module):
def forward(self, x: 'Tensor'):
logistic_value = torch.sigmoid(x)
return logistic_value / logistic_value.sum(dim=-1, keepdim=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sigmoid_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.sigmoid(tmp2)
tmp5 = tl.sigmoid(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl.sigmoid(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl.sigmoid(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tmp1 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sigmoid_sum_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SmoothSoftmaxNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| zsl24/voice-activity-detection | SmoothSoftmax | false | 16,823 | [
"MIT"
] | 74 | a034be23c6283121c6b72e778c6ff6711045cbe3 | https://github.com/zsl24/voice-activity-detection/tree/a034be23c6283121c6b72e778c6ff6711045cbe3 |
Quaternion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/6p/c6p43qqv7a36ths4tadzruramlotkrd3bqifom3aeud4dem5uvnd.py
# Topologically Sorted Source Nodes: [rvec], Original ATen: [aten.div]
# Source node to ATen node mapping:
# rvec => div
# Graph fragment:
# %div : [num_users=30] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %unsqueeze), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xs/cxs4nsdzr7rdm7fghcbfqs7dgquishluorqg442xilcmviudil3o.py
# Topologically Sorted Source Nodes: [pow_2, mul, sub, pow_3, mul_1, sub_1, mul_2, mul_3, sub_2, mul_4, mul_5, mul_6, add_1, mul_7, mul_8, mul_9, add_2, mul_10, pow_4, mul_11, sub_3, pow_5, mul_12, sub_4, mul_13, mul_14, sub_5, mul_15, mul_16, mul_17, sub_6, mul_18, mul_19, mul_20, add_3, mul_21, pow_6, mul_22, sub_7, pow_7, mul_23, sub_8], Original ATen: [aten.pow, aten.mul, aten.rsub, aten.sub, aten.add]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# mul => mul
# mul_1 => mul_1
# mul_10 => mul_10
# mul_11 => mul_11
# mul_12 => mul_12
# mul_13 => mul_13
# mul_14 => mul_14
# mul_15 => mul_15
# mul_16 => mul_16
# mul_17 => mul_17
# mul_18 => mul_18
# mul_19 => mul_19
# mul_2 => mul_2
# mul_20 => mul_20
# mul_21 => mul_21
# mul_22 => mul_22
# mul_23 => mul_23
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# mul_9 => mul_9
# pow_2 => pow_2
# pow_3 => pow_3
# pow_4 => pow_4
# pow_5 => pow_5
# pow_6 => pow_6
# pow_7 => pow_7
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# sub_4 => sub_4
# sub_5 => sub_5
# sub_6 => sub_6
# sub_7 => sub_7
# sub_8 => sub_8
# Graph fragment:
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, 2.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_1, 2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_3, 2.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, %select_3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, %select_5), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mul_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 2.0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_6, %select_7), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, %select_9), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %mul_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 2.0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_10, %select_11), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_12, %select_13), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, %mul_9), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 2.0), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_14, 2), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_4, 2.0), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_11), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_15, 2), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_5, 2.0), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_3, %mul_12), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_16, %select_17), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_18, %select_19), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_13, %mul_14), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, 2.0), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_20, %select_21), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_22, %select_23), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_16, %mul_17), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, 2.0), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_24, %select_25), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_26, %select_27), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_19, %mul_20), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 2.0), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_28, 2), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_6, 2.0), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_22), kwargs = {})
# %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_29, 2), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_7, 2.0), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_7, %mul_23), kwargs = {})
triton_poi_fused_add_mul_pow_rsub_sub_1 = async_compile.triton('triton_poi_fused_add_mul_pow_rsub_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_rsub_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_rsub_sub_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp12 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp1 = tmp0 * tmp0
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp3
tmp7 = tmp6 * tmp6
tmp8 = tmp7 * tmp2
tmp9 = tmp5 - tmp8
tmp11 = tmp10 * tmp0
tmp13 = tmp6 * tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp2
tmp16 = tmp10 * tmp6
tmp17 = tmp0 * tmp12
tmp18 = tmp16 + tmp17
tmp19 = tmp18 * tmp2
tmp20 = tmp11 + tmp13
tmp21 = tmp20 * tmp2
tmp22 = tmp0 * tmp6
tmp23 = tmp10 * tmp12
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp2
tmp26 = tmp16 - tmp17
tmp27 = tmp26 * tmp2
tmp28 = tmp23 + tmp22
tmp29 = tmp28 * tmp2
tmp30 = tmp10 * tmp10
tmp31 = tmp30 * tmp2
tmp32 = tmp4 - tmp31
tmp33 = tmp32 - tmp8
tmp34 = tmp32 - tmp3
tl.store(out_ptr0 + (x0 + (144*x1)), tmp9, xmask)
tl.store(out_ptr1 + (x0 + (144*x1)), tmp15, xmask)
tl.store(out_ptr2 + (x0 + (144*x1)), tmp19, xmask)
tl.store(out_ptr3 + (x0 + (144*x1)), tmp21, xmask)
tl.store(out_ptr4 + (x0 + (144*x1)), tmp25, xmask)
tl.store(out_ptr5 + (x0 + (144*x1)), tmp27, xmask)
tl.store(out_ptr6 + (x0 + (144*x1)), tmp29, xmask)
tl.store(out_ptr7 + (x0 + (144*x1)), tmp33, xmask)
tl.store(out_ptr8 + (x0 + (144*x1)), tmp34, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [rvec], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf10 = empty_strided_cuda((4, 36, 4), (144, 4, 1), torch.float32)
buf1 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 0) # alias
buf2 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 16) # alias
buf3 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 32) # alias
buf4 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 48) # alias
buf6 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 80) # alias
buf7 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 96) # alias
buf8 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 112) # alias
buf5 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 64) # alias
buf9 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 128) # alias
# Topologically Sorted Source Nodes: [pow_2, mul, sub, pow_3, mul_1, sub_1, mul_2, mul_3, sub_2, mul_4, mul_5, mul_6, add_1, mul_7, mul_8, mul_9, add_2, mul_10, pow_4, mul_11, sub_3, pow_5, mul_12, sub_4, mul_13, mul_14, sub_5, mul_15, mul_16, mul_17, sub_6, mul_18, mul_19, mul_20, add_3, mul_21, pow_6, mul_22, sub_7, pow_7, mul_23, sub_8], Original ATen: [aten.pow, aten.mul, aten.rsub, aten.sub, aten.add]
triton_poi_fused_add_mul_pow_rsub_sub_1.run(buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf5, buf9, 64, grid=grid(64), stream=stream0)
del buf0
return (reinterpret_tensor(buf10, (64, 3, 3), (9, 3, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class Quaternion(nn.Module):
def __init__(self):
super(Quaternion, self).__init__()
def forward(self, rvec):
theta = torch.sqrt(1e-05 + torch.sum(rvec ** 2, dim=1))
rvec = rvec / theta[:, None]
return torch.stack((1.0 - 2.0 * rvec[:, 1] ** 2 - 2.0 * rvec[:, 2] **
2, 2.0 * (rvec[:, 0] * rvec[:, 1] - rvec[:, 2] * rvec[:, 3]),
2.0 * (rvec[:, 0] * rvec[:, 2] + rvec[:, 1] * rvec[:, 3]), 2.0 *
(rvec[:, 0] * rvec[:, 1] + rvec[:, 2] * rvec[:, 3]), 1.0 - 2.0 *
rvec[:, 0] ** 2 - 2.0 * rvec[:, 2] ** 2, 2.0 * (rvec[:, 1] *
rvec[:, 2] - rvec[:, 0] * rvec[:, 3]), 2.0 * (rvec[:, 0] * rvec
[:, 2] - rvec[:, 1] * rvec[:, 3]), 2.0 * (rvec[:, 0] * rvec[:,
3] + rvec[:, 1] * rvec[:, 2]), 1.0 - 2.0 * rvec[:, 0] ** 2 -
2.0 * rvec[:, 1] ** 2), dim=1).view(-1, 3, 3)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_rsub_sub_1(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp1 = tmp0 * tmp0
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp3
tmp7 = tmp6 * tmp6
tmp8 = tmp7 * tmp2
tmp9 = tmp5 - tmp8
tmp11 = tmp10 * tmp0
tmp13 = tmp6 * tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp2
tmp16 = tmp10 * tmp6
tmp17 = tmp0 * tmp12
tmp18 = tmp16 + tmp17
tmp19 = tmp18 * tmp2
tmp20 = tmp11 + tmp13
tmp21 = tmp20 * tmp2
tmp22 = tmp0 * tmp6
tmp23 = tmp10 * tmp12
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp2
tmp26 = tmp16 - tmp17
tmp27 = tmp26 * tmp2
tmp28 = tmp23 + tmp22
tmp29 = tmp28 * tmp2
tmp30 = tmp10 * tmp10
tmp31 = tmp30 * tmp2
tmp32 = tmp4 - tmp31
tmp33 = tmp32 - tmp8
tmp34 = tmp32 - tmp3
tl.store(out_ptr0 + (x0 + 144 * x1), tmp9, xmask)
tl.store(out_ptr1 + (x0 + 144 * x1), tmp15, xmask)
tl.store(out_ptr2 + (x0 + 144 * x1), tmp19, xmask)
tl.store(out_ptr3 + (x0 + 144 * x1), tmp21, xmask)
tl.store(out_ptr4 + (x0 + 144 * x1), tmp25, xmask)
tl.store(out_ptr5 + (x0 + 144 * x1), tmp27, xmask)
tl.store(out_ptr6 + (x0 + 144 * x1), tmp29, xmask)
tl.store(out_ptr7 + (x0 + 144 * x1), tmp33, xmask)
tl.store(out_ptr8 + (x0 + 144 * x1), tmp34, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf10 = empty_strided_cuda((4, 36, 4), (144, 4, 1), torch.float32)
buf1 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 0)
buf2 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 16)
buf3 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 32)
buf4 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 48)
buf6 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 80)
buf7 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 96)
buf8 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 112)
buf5 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 64)
buf9 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 128)
triton_poi_fused_add_mul_pow_rsub_sub_1[grid(64)](buf0, buf1, buf2,
buf3, buf4, buf6, buf7, buf8, buf5, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
return reinterpret_tensor(buf10, (64, 3, 3), (9, 3, 1), 0),
class QuaternionNew(nn.Module):
def __init__(self):
super(QuaternionNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| zhuhao-nju/mofanerf | Quaternion | false | 16,824 | [
"MIT"
] | 55 | 0206526e25aab3dd8f0cc789f290c7559642676b | https://github.com/zhuhao-nju/mofanerf/tree/0206526e25aab3dd8f0cc789f290c7559642676b |
Rodrigues | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/6p/c6p43qqv7a36ths4tadzruramlotkrd3bqifom3aeud4dem5uvnd.py
# Topologically Sorted Source Nodes: [rvec], Original ATen: [aten.div]
# Source node to ATen node mapping:
# rvec => div
# Graph fragment:
# %div : [num_users=24] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %unsqueeze), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/x2/cx2yb6qpgynif644namqcrcm2rvyiwz3bejropjwd3ujhy4gpzke.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, add, theta, pow_2, pow_3, sub, costh, mul, add_1, mul_1, sub_1, mul_2, sinth, mul_3, sub_2, mul_4, sub_3, mul_5, mul_6, add_2, mul_7, sub_4, mul_8, mul_9, add_3, pow_4, pow_5, sub_5, mul_10, add_4, mul_11, sub_6, mul_12, mul_13, sub_7, mul_14, sub_8, mul_15, mul_16, sub_9, mul_17, sub_10, mul_18, mul_19, add_5, pow_6, pow_7, sub_11, mul_20, add_6], Original ATen: [aten.pow, aten.sum, aten.add, aten.sqrt, aten.rsub, aten.cos, aten.mul, aten.sin, aten.sub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# add_4 => add_4
# add_5 => add_5
# add_6 => add_6
# costh => cos
# mul => mul
# mul_1 => mul_1
# mul_10 => mul_10
# mul_11 => mul_11
# mul_12 => mul_12
# mul_13 => mul_13
# mul_14 => mul_14
# mul_15 => mul_15
# mul_16 => mul_16
# mul_17 => mul_17
# mul_18 => mul_18
# mul_19 => mul_19
# mul_2 => mul_2
# mul_20 => mul_20
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# mul_9 => mul_9
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# pow_4 => pow_4
# pow_5 => pow_5
# pow_6 => pow_6
# pow_7 => pow_7
# sinth => sin
# sub => sub
# sub_1 => sub_1
# sub_10 => sub_10
# sub_11 => sub_11
# sub_2 => sub_2
# sub_3 => sub_3
# sub_4 => sub_4
# sub_5 => sub_5
# sub_6 => sub_6
# sub_7 => sub_7
# sub_8 => sub_8
# sub_9 => sub_9
# sum_1 => sum_1
# theta => sqrt
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-05), kwargs = {})
# %sqrt : [num_users=3] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select, 2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_1, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %pow_3), kwargs = {})
# %cos : [num_users=9] = call_function[target=torch.ops.aten.cos.default](args = (%sqrt,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %cos), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, %mul), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, %select_3), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %cos), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %sub_1), kwargs = {})
# %sin : [num_users=6] = call_function[target=torch.ops.aten.sin.default](args = (%sqrt,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, %sin), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mul_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_5, %select_6), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %cos), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %sub_3), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_7, %sin), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %mul_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, %select_9), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %cos), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, %sub_4), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_10, %sin), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, %mul_9), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_11, 2), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_12, 2), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %pow_5), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %cos), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_4, %mul_10), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_13, %select_14), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %cos), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_11, %sub_6), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_15, %sin), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_12, %mul_13), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_16, %select_17), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %cos), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_14, %sub_8), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_18, %sin), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_15, %mul_16), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_19, %select_20), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %cos), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_17, %sub_10), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_21, %sin), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_18, %mul_19), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_22, 2), kwargs = {})
# %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_23, 2), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %pow_7), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %cos), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_6, %mul_20), kwargs = {})
triton_poi_fused_add_cos_mul_pow_rsub_sin_sqrt_sub_sum_1 = async_compile.triton('triton_poi_fused_add_cos_mul_pow_rsub_sin_sqrt_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cos_mul_pow_rsub_sin_sqrt_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_cos_mul_pow_rsub_sin_sqrt_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp6 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp21 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp25 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp5 = tmp4 * tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp11 + tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tl_math.cos(tmp17)
tmp19 = tmp3 * tmp18
tmp20 = tmp1 + tmp19
tmp22 = tmp0 * tmp21
tmp23 = tmp2 - tmp18
tmp24 = tmp22 * tmp23
tmp26 = tl_math.sin(tmp17)
tmp27 = tmp25 * tmp26
tmp28 = tmp24 - tmp27
tmp29 = tmp0 * tmp25
tmp30 = tmp29 * tmp23
tmp31 = tmp21 * tmp26
tmp32 = tmp30 + tmp31
tmp33 = tmp24 + tmp27
tmp34 = tmp21 * tmp25
tmp35 = tmp34 * tmp23
tmp36 = tmp0 * tmp26
tmp37 = tmp35 - tmp36
tmp38 = tmp30 - tmp31
tmp39 = tmp35 + tmp36
tmp40 = tmp21 * tmp21
tmp41 = tmp2 - tmp40
tmp42 = tmp41 * tmp18
tmp43 = tmp40 + tmp42
tmp44 = tmp25 * tmp25
tmp45 = tmp2 - tmp44
tmp46 = tmp45 * tmp18
tmp47 = tmp44 + tmp46
tl.store(out_ptr0 + (x0 + (144*x1)), tmp20, xmask)
tl.store(out_ptr1 + (x0 + (144*x1)), tmp28, xmask)
tl.store(out_ptr2 + (x0 + (144*x1)), tmp32, xmask)
tl.store(out_ptr3 + (x0 + (144*x1)), tmp33, xmask)
tl.store(out_ptr4 + (x0 + (144*x1)), tmp37, xmask)
tl.store(out_ptr5 + (x0 + (144*x1)), tmp38, xmask)
tl.store(out_ptr6 + (x0 + (144*x1)), tmp39, xmask)
tl.store(out_ptr7 + (x0 + (144*x1)), tmp43, xmask)
tl.store(out_ptr8 + (x0 + (144*x1)), tmp47, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [rvec], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
buf10 = empty_strided_cuda((4, 36, 4), (144, 4, 1), torch.float32)
buf1 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 0) # alias
buf2 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 16) # alias
buf3 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 32) # alias
buf4 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 48) # alias
buf6 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 80) # alias
buf7 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 96) # alias
buf8 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 112) # alias
buf5 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 64) # alias
buf9 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 128) # alias
# Topologically Sorted Source Nodes: [pow_1, sum_1, add, theta, pow_2, pow_3, sub, costh, mul, add_1, mul_1, sub_1, mul_2, sinth, mul_3, sub_2, mul_4, sub_3, mul_5, mul_6, add_2, mul_7, sub_4, mul_8, mul_9, add_3, pow_4, pow_5, sub_5, mul_10, add_4, mul_11, sub_6, mul_12, mul_13, sub_7, mul_14, sub_8, mul_15, mul_16, sub_9, mul_17, sub_10, mul_18, mul_19, add_5, pow_6, pow_7, sub_11, mul_20, add_6], Original ATen: [aten.pow, aten.sum, aten.add, aten.sqrt, aten.rsub, aten.cos, aten.mul, aten.sin, aten.sub]
triton_poi_fused_add_cos_mul_pow_rsub_sin_sqrt_sub_sum_1.run(buf0, arg0_1, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf5, buf9, 64, grid=grid(64), stream=stream0)
del arg0_1
del buf0
return (reinterpret_tensor(buf10, (64, 3, 3), (9, 3, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class Rodrigues(nn.Module):
def __init__(self):
super(Rodrigues, self).__init__()
def forward(self, rvec):
theta = torch.sqrt(1e-05 + torch.sum(rvec ** 2, dim=1))
rvec = rvec / theta[:, None]
costh = torch.cos(theta)
sinth = torch.sin(theta)
return torch.stack((rvec[:, 0] ** 2 + (1.0 - rvec[:, 0] ** 2) *
costh, rvec[:, 0] * rvec[:, 1] * (1.0 - costh) - rvec[:, 2] *
sinth, rvec[:, 0] * rvec[:, 2] * (1.0 - costh) + rvec[:, 1] *
sinth, rvec[:, 0] * rvec[:, 1] * (1.0 - costh) + rvec[:, 2] *
sinth, rvec[:, 1] ** 2 + (1.0 - rvec[:, 1] ** 2) * costh, rvec[
:, 1] * rvec[:, 2] * (1.0 - costh) - rvec[:, 0] * sinth, rvec[:,
0] * rvec[:, 2] * (1.0 - costh) - rvec[:, 1] * sinth, rvec[:, 1
] * rvec[:, 2] * (1.0 - costh) + rvec[:, 0] * sinth, rvec[:, 2] **
2 + (1.0 - rvec[:, 2] ** 2) * costh), dim=1).view(-1, 3, 3)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_cos_mul_pow_rsub_sin_sqrt_sub_sum_1(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5,
out_ptr6, out_ptr7, out_ptr8, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp6 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp25 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp5 = tmp4 * tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp11 + tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tl_math.cos(tmp17)
tmp19 = tmp3 * tmp18
tmp20 = tmp1 + tmp19
tmp22 = tmp0 * tmp21
tmp23 = tmp2 - tmp18
tmp24 = tmp22 * tmp23
tmp26 = tl_math.sin(tmp17)
tmp27 = tmp25 * tmp26
tmp28 = tmp24 - tmp27
tmp29 = tmp0 * tmp25
tmp30 = tmp29 * tmp23
tmp31 = tmp21 * tmp26
tmp32 = tmp30 + tmp31
tmp33 = tmp24 + tmp27
tmp34 = tmp21 * tmp25
tmp35 = tmp34 * tmp23
tmp36 = tmp0 * tmp26
tmp37 = tmp35 - tmp36
tmp38 = tmp30 - tmp31
tmp39 = tmp35 + tmp36
tmp40 = tmp21 * tmp21
tmp41 = tmp2 - tmp40
tmp42 = tmp41 * tmp18
tmp43 = tmp40 + tmp42
tmp44 = tmp25 * tmp25
tmp45 = tmp2 - tmp44
tmp46 = tmp45 * tmp18
tmp47 = tmp44 + tmp46
tl.store(out_ptr0 + (x0 + 144 * x1), tmp20, xmask)
tl.store(out_ptr1 + (x0 + 144 * x1), tmp28, xmask)
tl.store(out_ptr2 + (x0 + 144 * x1), tmp32, xmask)
tl.store(out_ptr3 + (x0 + 144 * x1), tmp33, xmask)
tl.store(out_ptr4 + (x0 + 144 * x1), tmp37, xmask)
tl.store(out_ptr5 + (x0 + 144 * x1), tmp38, xmask)
tl.store(out_ptr6 + (x0 + 144 * x1), tmp39, xmask)
tl.store(out_ptr7 + (x0 + 144 * x1), tmp43, xmask)
tl.store(out_ptr8 + (x0 + 144 * x1), tmp47, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf10 = empty_strided_cuda((4, 36, 4), (144, 4, 1), torch.float32)
buf1 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 0)
buf2 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 16)
buf3 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 32)
buf4 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 48)
buf6 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 80)
buf7 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 96)
buf8 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 112)
buf5 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 64)
buf9 = reinterpret_tensor(buf10, (4, 4, 4), (144, 4, 1), 128)
triton_poi_fused_add_cos_mul_pow_rsub_sin_sqrt_sub_sum_1[grid(64)](buf0
, arg0_1, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf5, buf9,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del buf0
return reinterpret_tensor(buf10, (64, 3, 3), (9, 3, 1), 0),
class RodriguesNew(nn.Module):
def __init__(self):
super(RodriguesNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| zhuhao-nju/mofanerf | Rodrigues | false | 16,825 | [
"MIT"
] | 55 | 0206526e25aab3dd8f0cc789f290c7559642676b | https://github.com/zhuhao-nju/mofanerf/tree/0206526e25aab3dd8f0cc789f290c7559642676b |
ChebConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ja/cjavyu5r4xu5nlitoyabvy7ofhz4ogm5mu3dk4l67xn2v3fgexma.py
# Topologically Sorted Source Nodes: [D], Original ATen: [aten.diag_embed]
# Source node to ATen node mapping:
# D => eq, full_default, iota, where
# Graph fragment:
# %iota : [num_users=3] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota, %unsqueeze_1), kwargs = {})
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %permute, %full_default), kwargs = {})
triton_poi_fused_diag_embed_0 = async_compile.triton('triton_poi_fused_diag_embed_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_diag_embed_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_diag_embed_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp3 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = -0.5
tmp11 = libdevice.pow(tmp9, tmp10)
tmp12 = 0.0
tmp13 = tl.where(tmp2, tmp11, tmp12)
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7m/c7melmuezdwy5c6u3qg5b2dvtctgso6etynkxd74lrziaclsxuvt.py
# Topologically Sorted Source Nodes: [D, eye, L], Original ATen: [aten.diag_embed, aten.eye, aten.sub]
# Source node to ATen node mapping:
# D => full_default, iota
# L => sub
# eye => eq_1, full_default_1, where_1
# Graph fragment:
# %iota : [num_users=3] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_1, %iota), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_1, %full_default), kwargs = {})
# %sub : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mm_1), kwargs = {})
triton_poi_fused_diag_embed_eye_sub_1 = async_compile.triton('triton_poi_fused_diag_embed_eye_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_diag_embed_eye_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_diag_embed_eye_sub_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp6 = tl.load(in_out_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tmp5 - tmp6
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/nt/cnt63h74o735agddjfb5clildoy6gfngo35dt46zyissubqboxxq.py
# Topologically Sorted Source Nodes: [D, eye, multi_order_laplacian], Original ATen: [aten.diag_embed, aten.eye, aten.zeros]
# Source node to ATen node mapping:
# D => full_default, iota
# eye => eq_1, full_default_1, where_1
# multi_order_laplacian => full_default_3
# Graph fragment:
# %iota : [num_users=3] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_1, %iota), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_1, %full_default), kwargs = {})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([5, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %select_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default_3, %where_1, 0, 0), kwargs = {})
# %select_scatter_default_1 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %sub, 0, 1), kwargs = {})
triton_poi_fused_diag_embed_eye_zeros_2 = async_compile.triton('triton_poi_fused_diag_embed_eye_zeros_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_diag_embed_eye_zeros_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_diag_embed_eye_zeros_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
tmp3 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp0 == tmp4
tmp6 = x1
tmp7 = x0
tmp8 = tmp6 == tmp7
tmp9 = 1.0
tmp10 = 0.0
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = tl.where(tmp5, tmp11, tmp10)
tmp13 = tl.where(tmp2, tmp3, tmp12)
tl.store(out_ptr0 + (x4), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rs/crsoboivz65dylv2rwpdelqiarxf2u4i3uaaebwqwvnjhxkjbjjw.py
# Topologically Sorted Source Nodes: [mul, sub_1], Original ATen: [aten.mul, aten.sub]
# Source node to ATen node mapping:
# mul => mul
# sub_1 => sub_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_2, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %select_8), kwargs = {})
# %select_scatter_default_2 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %sub_1, 0, 2), kwargs = {})
triton_poi_fused_mul_sub_3 = async_compile.triton('triton_poi_fused_mul_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7w/c7wq3e2ygdgfl4kidxckp4el2aafp77m6tdwbcqmea2hupgvkoek.py
# Topologically Sorted Source Nodes: [mul_1, sub_2], Original ATen: [aten.mul, aten.sub]
# Source node to ATen node mapping:
# mul_1 => mul_1
# sub_2 => sub_2
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_3, 2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %select_15), kwargs = {})
# %select_scatter_default_3 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_2, %sub_2, 0, 3), kwargs = {})
triton_poi_fused_mul_sub_4 = async_compile.triton('triton_poi_fused_mul_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sub_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vx/cvxptdlc4xjc2zntndyxuyooh2cl4jzpbya2v26rjm7vmkjhjszi.py
# Topologically Sorted Source Nodes: [mul_2, sub_3], Original ATen: [aten.mul, aten.sub]
# Source node to ATen node mapping:
# mul_2 => mul_2
# sub_3 => sub_3
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_4, 2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %select_22), kwargs = {})
# %select_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_3, %sub_3, 0, 4), kwargs = {})
triton_poi_fused_mul_sub_5 = async_compile.triton('triton_poi_fused_mul_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sub_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wi/cwija4xxrmds4pzlo5szwl2su3gon4ktmt34eluogiaiavrknbqr.py
# Topologically Sorted Source Nodes: [sum_2, result_2], Original ATen: [aten.sum, aten.add]
# Source node to ATen node mapping:
# result_2 => add
# sum_2 => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_6, [0]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %primals_4), kwargs = {})
triton_poi_fused_add_sum_6 = async_compile.triton('triton_poi_fused_add_sum_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sum_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sum_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp7 = tl.load(in_ptr0 + (64 + x2), xmask)
tmp9 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (5, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_4, (1, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [D], Original ATen: [aten.diag_embed]
stream0 = get_raw_stream(0)
triton_poi_fused_diag_embed_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(buf0, primals_1, out=buf1)
del primals_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm_1], Original ATen: [aten.mm]
extern_kernels.mm(buf1, buf0, out=buf2)
del buf0
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [D, eye, L], Original ATen: [aten.diag_embed, aten.eye, aten.sub]
triton_poi_fused_diag_embed_eye_sub_1.run(buf3, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [D, eye, multi_order_laplacian], Original ATen: [aten.diag_embed, aten.eye, aten.zeros]
triton_poi_fused_diag_embed_eye_zeros_2.run(buf3, buf4, 80, grid=grid(80), stream=stream0)
buf5 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mm_2], Original ATen: [aten.mm]
extern_kernels.mm(buf3, reinterpret_tensor(buf4, (4, 4), (4, 1), 16), out=buf5)
buf6 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sub_1], Original ATen: [aten.mul, aten.sub]
triton_poi_fused_mul_sub_3.run(buf5, buf4, buf6, 80, grid=grid(80), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [mm_3], Original ATen: [aten.mm]
extern_kernels.mm(buf3, reinterpret_tensor(buf6, (4, 4), (4, 1), 32), out=buf7)
buf8 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [mul_1, sub_2], Original ATen: [aten.mul, aten.sub]
triton_poi_fused_mul_sub_4.run(buf7, buf6, buf8, 80, grid=grid(80), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [mm_4], Original ATen: [aten.mm]
extern_kernels.mm(buf3, reinterpret_tensor(buf8, (4, 4), (4, 1), 48), out=buf9)
del buf3
buf10 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [mul_2, sub_3], Original ATen: [aten.mul, aten.sub]
triton_poi_fused_mul_sub_5.run(buf9, buf8, buf10, 80, grid=grid(80), stream=stream0)
buf11 = reinterpret_tensor(buf8, (20, 4), (4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf10, (20, 4), (4, 1), 0), primals_2, out=buf11)
del primals_2
buf12 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [result_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf11, (5, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (5, 4, 4), (16, 4, 1), 0), out=buf12)
del primals_3
buf13 = reinterpret_tensor(buf9, (1, 4, 4), (16, 4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [sum_2, result_2], Original ATen: [aten.sum, aten.add]
triton_poi_fused_add_sum_6.run(buf12, primals_4, buf13, 16, grid=grid(16), stream=stream0)
del buf12
del primals_4
return (buf13, reinterpret_tensor(buf11, (5, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((5, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import init
class ChebConv(nn.Module):
"""
The ChebNet convolution operation.
:param in_c: int, number of input channels.
:param out_c: int, number of output channels.
:param K: int, the order of Chebyshev Polynomial.
"""
def __init__(self, in_c, out_c, K, bias=True, normalize=True):
super(ChebConv, self).__init__()
self.normalize = normalize
self.weight = nn.Parameter(torch.Tensor(K + 1, 1, in_c, out_c))
init.xavier_normal_(self.weight)
if bias:
self.bias = nn.Parameter(torch.Tensor(1, 1, out_c))
init.zeros_(self.bias)
else:
self.register_parameter('bias', None)
self.K = K + 1
def forward(self, inputs, graph):
"""
:param inputs: the input data, [B, N, C]
:param graph: the graph structure, [N, N]
:return: convolution result, [B, N, D]
"""
L = ChebConv.get_laplacian(graph, self.normalize)
mul_L = self.cheb_polynomial(L).unsqueeze(1)
result = torch.matmul(mul_L, inputs)
result = torch.matmul(result, self.weight)
result = torch.sum(result, dim=0) + self.bias
return result
def cheb_polynomial(self, laplacian):
"""
Compute the Chebyshev Polynomial, according to the graph laplacian.
:param laplacian: the graph laplacian, [N, N].
:return: the multi order Chebyshev laplacian, [K, N, N].
"""
N = laplacian.size(0)
multi_order_laplacian = torch.zeros([self.K, N, N], device=
laplacian.device, dtype=torch.float)
multi_order_laplacian[0] = torch.eye(N, device=laplacian.device,
dtype=torch.float)
if self.K == 1:
return multi_order_laplacian
else:
multi_order_laplacian[1] = laplacian
if self.K == 2:
return multi_order_laplacian
else:
for k in range(2, self.K):
multi_order_laplacian[k] = 2 * torch.mm(laplacian,
multi_order_laplacian[k - 1]) - multi_order_laplacian[
k - 2]
return multi_order_laplacian
@staticmethod
def get_laplacian(graph, normalize):
"""
return the laplacian of the graph.
:param graph: the graph structure without self loop, [N, N].
:param normalize: whether to used the normalized laplacian.
:return: graph laplacian.
"""
if normalize:
D = torch.diag(torch.sum(graph, dim=-1) ** (-1 / 2))
L = torch.eye(graph.size(0), device=graph.device, dtype=graph.dtype
) - torch.mm(torch.mm(D, graph), D)
else:
D = torch.diag(torch.sum(graph, dim=-1))
L = D - graph
return L
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_c': 4, 'out_c': 4, 'K': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_diag_embed_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = x1
tmp2 = tmp0 == tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = -0.5
tmp11 = libdevice.pow(tmp9, tmp10)
tmp12 = 0.0
tmp13 = tl.where(tmp2, tmp11, tmp12)
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_diag_embed_eye_sub_1(in_out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp6 = tl.load(in_out_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp7 = tmp5 - tmp6
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_diag_embed_eye_zeros_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp0 == tmp4
tmp6 = x1
tmp7 = x0
tmp8 = tmp6 == tmp7
tmp9 = 1.0
tmp10 = 0.0
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = tl.where(tmp5, tmp11, tmp10)
tmp13 = tl.where(tmp2, tmp3, tmp12)
tl.store(out_ptr0 + x4, tmp13, xmask)
@triton.jit
def triton_poi_fused_mul_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_mul_sub_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_mul_sub_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp9 = tl.where(tmp2, tmp7, tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_sum_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp7 = tl.load(in_ptr0 + (64 + x2), xmask)
tmp9 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (5, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_4, (1, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_diag_embed_0[grid(16)](primals_1, buf0, 16, XBLOCK
=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, primals_1, out=buf1)
del primals_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, buf0, out=buf2)
del buf0
buf3 = buf2
del buf2
triton_poi_fused_diag_embed_eye_sub_1[grid(16)](buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_diag_embed_eye_zeros_2[grid(80)](buf3, buf4, 80,
XBLOCK=128, num_warps=4, num_stages=1)
buf5 = buf1
del buf1
extern_kernels.mm(buf3, reinterpret_tensor(buf4, (4, 4), (4, 1), 16
), out=buf5)
buf6 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sub_3[grid(80)](buf5, buf4, buf6, 80, XBLOCK=
128, num_warps=4, num_stages=1)
buf7 = buf5
del buf5
extern_kernels.mm(buf3, reinterpret_tensor(buf6, (4, 4), (4, 1), 32
), out=buf7)
buf8 = buf4
del buf4
triton_poi_fused_mul_sub_4[grid(80)](buf7, buf6, buf8, 80, XBLOCK=
128, num_warps=4, num_stages=1)
buf9 = buf7
del buf7
extern_kernels.mm(buf3, reinterpret_tensor(buf8, (4, 4), (4, 1), 48
), out=buf9)
del buf3
buf10 = buf6
del buf6
triton_poi_fused_mul_sub_5[grid(80)](buf9, buf8, buf10, 80, XBLOCK=
128, num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf8, (20, 4), (4, 1), 0)
del buf8
extern_kernels.mm(reinterpret_tensor(buf10, (20, 4), (4, 1), 0),
primals_2, out=buf11)
del primals_2
buf12 = buf10
del buf10
extern_kernels.bmm(reinterpret_tensor(buf11, (5, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_3, (5, 4, 4), (16, 4, 1), 0),
out=buf12)
del primals_3
buf13 = reinterpret_tensor(buf9, (1, 4, 4), (16, 4, 1), 0)
del buf9
triton_poi_fused_add_sum_6[grid(16)](buf12, primals_4, buf13, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf12
del primals_4
return buf13, reinterpret_tensor(buf11, (5, 4, 4), (16, 1, 4), 0)
class ChebConvNew(nn.Module):
"""
The ChebNet convolution operation.
:param in_c: int, number of input channels.
:param out_c: int, number of output channels.
:param K: int, the order of Chebyshev Polynomial.
"""
def __init__(self, in_c, out_c, K, bias=True, normalize=True):
super(ChebConvNew, self).__init__()
self.normalize = normalize
self.weight = nn.Parameter(torch.Tensor(K + 1, 1, in_c, out_c))
init.xavier_normal_(self.weight)
if bias:
self.bias = nn.Parameter(torch.Tensor(1, 1, out_c))
init.zeros_(self.bias)
else:
self.register_parameter('bias', None)
self.K = K + 1
def cheb_polynomial(self, laplacian):
"""
Compute the Chebyshev Polynomial, according to the graph laplacian.
:param laplacian: the graph laplacian, [N, N].
:return: the multi order Chebyshev laplacian, [K, N, N].
"""
N = laplacian.size(0)
multi_order_laplacian = torch.zeros([self.K, N, N], device=
laplacian.device, dtype=torch.float)
multi_order_laplacian[0] = torch.eye(N, device=laplacian.device,
dtype=torch.float)
if self.K == 1:
return multi_order_laplacian
else:
multi_order_laplacian[1] = laplacian
if self.K == 2:
return multi_order_laplacian
else:
for k in range(2, self.K):
multi_order_laplacian[k] = 2 * torch.mm(laplacian,
multi_order_laplacian[k - 1]) - multi_order_laplacian[
k - 2]
return multi_order_laplacian
@staticmethod
def get_laplacian(graph, normalize):
"""
return the laplacian of the graph.
:param graph: the graph structure without self loop, [N, N].
:param normalize: whether to used the normalized laplacian.
:return: graph laplacian.
"""
if normalize:
D = torch.diag(torch.sum(graph, dim=-1) ** (-1 / 2))
L = torch.eye(graph.size(0), device=graph.device, dtype=graph.dtype
) - torch.mm(torch.mm(D, graph), D)
else:
D = torch.diag(torch.sum(graph, dim=-1))
L = D - graph
return L
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_4 = self.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| zhaoweixi/GraFormer | ChebConv | false | 16,826 | [
"BSD-2-Clause"
] | 384 | 0a0a04014cdf157c11ab8e952862efa27c6a1980 | https://github.com/zhaoweixi/GraFormer/tree/0a0a04014cdf157c11ab8e952862efa27c6a1980 |
Attention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/gz/cgzs24mbxphohavcbjewgvglsrez2nf5347ftei7jzqkmjnvqt3s.py
# Topologically Sorted Source Nodes: [attended_input], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# attended_input => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %unsqueeze), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 256
x4 = (xindex // 4)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x5), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attended_input], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(arg1_1, arg0_1, buf0, 1024, grid=grid(1024), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch import nn
class Attention(nn.Module):
def forward(self, selected_input: 'Tensor', attention: 'Tensor'):
attended_input = selected_input * attention.unsqueeze(-1)
return attended_input
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 256
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x5, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(1024)](arg1_1, arg0_1, buf0, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class AttentionNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zsl24/voice-activity-detection | Attention | false | 16,827 | [
"MIT"
] | 74 | a034be23c6283121c6b72e778c6ff6711045cbe3 | https://github.com/zsl24/voice-activity-detection/tree/a034be23c6283121c6b72e778c6ff6711045cbe3 |
RerangeLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/mj/cmjldl2wbtdm4vb67syz22aplf5t4rmful3wvwclylaownjaq2hv.py
# Topologically Sorted Source Nodes: [add, truediv], Original ATen: [aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# truediv => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2.0), kwargs = {})
triton_poi_fused_add_div_0 = async_compile.triton('triton_poi_fused_add_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, truediv], Original ATen: [aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn as nn
class RerangeLayer(nn.Module):
def __init__(self):
super(RerangeLayer, self).__init__()
def forward(self, inp):
return (inp + 1.0) / 2.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class RerangeLayerNew(nn.Module):
def __init__(self):
super(RerangeLayerNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| zvict/HyperRIM | RerangeLayer | false | 16,828 | [
"Apache-2.0"
] | 92 | f3800196b59ea0f94561efa88ec2e6675e4c8b00 | https://github.com/zvict/HyperRIM/tree/f3800196b59ea0f94561efa88ec2e6675e4c8b00 |
FocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [cross_entropy_1], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# cross_entropy_1 => amax_1, sub_2
# Graph fragment:
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_1), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kh/ckh5gimqiqdkf7nauozqrdjc2zt322h57wtvsnk75mdfe6t6pqvl.py
# Topologically Sorted Source Nodes: [cross_entropy_1, logpt, pt, sub, pow_1, neg_1, focal_loss, balanced_focal_loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div, aten.exp, aten.rsub, aten.pow]
# Source node to ATen node mapping:
# balanced_focal_loss => mul_3
# cross_entropy_1 => div_1, exp_1, log_2, mul_1, neg_1, sub_3, sum_3, sum_4
# focal_loss => mul_2
# logpt => neg_2
# neg_1 => neg_3
# pow_1 => pow_1
# pt => exp_2
# sub => sub_4
# Graph fragment:
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_3,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %log_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %arg0_1), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_4,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg_1, 64), kwargs = {})
# %neg_2 : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%div_1,), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_2,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp_2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_4, 2), kwargs = {})
# %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_3, %neg_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, 0.25), kwargs = {})
triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (r3), None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = -tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = 1.0
tmp25 = tmp24 - tmp23
tmp26 = tmp25 * tmp25
tmp27 = -tmp26
tmp28 = tmp27 * tmp22
tmp29 = 0.25
tmp30 = tmp28 * tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp30, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cross_entropy_1], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [cross_entropy_1, logpt, pt, sub, pow_1, neg_1, focal_loss, balanced_focal_loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div, aten.exp, aten.rsub, aten.pow]
triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1.run(buf2, buf0, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
def __init__(self, focusing_param=2, balance_param=0.25):
super(FocalLoss, self).__init__()
self.focusing_param = focusing_param
self.balance_param = balance_param
def forward(self, output, target):
cross_entropy = F.cross_entropy(output, target)
torch.log(cross_entropy)
logpt = -F.cross_entropy(output, target)
pt = torch.exp(logpt)
focal_loss = -(1 - pt) ** self.focusing_param * logpt
balanced_focal_loss = self.balance_param * focal_loss
return balanced_focal_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = -tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = 1.0
tmp25 = tmp24 - tmp23
tmp26 = tmp25 * tmp25
tmp27 = -tmp26
tmp28 = tmp27 * tmp22
tmp29 = 0.25
tmp30 = tmp28 * tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp30, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1[grid(1)](
buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf2,
class FocalLossNew(nn.Module):
def __init__(self, focusing_param=2, balance_param=0.25):
super(FocalLossNew, self).__init__()
self.focusing_param = focusing_param
self.balance_param = balance_param
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zwx8981/DBCNN-Pytorch | FocalLoss | false | 16,829 | [
"MIT"
] | 150 | 16c3156054a30a3eabb45dffcf538f42452a14f3 | https://github.com/zwx8981/DBCNN-Pytorch/tree/16c3156054a30a3eabb45dffcf538f42452a14f3 |
cross_entropy_prob | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [pred], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# pred => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f5/cf5kacqts2qtnf6lllagpt43xgndw7reh7jwbc6oav6eubehrpoe.py
# Topologically Sorted Source Nodes: [neg, pred, mul, sum_1, loss], Original ATen: [aten.neg, aten._log_softmax, aten.mul, aten.sum, aten.mean]
# Source node to ATen node mapping:
# loss => mean
# mul => mul
# neg => neg
# pred => exp, log, sub_1, sum_1
# sum_1 => sum_2
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %sub_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {})
triton_per_fused__log_softmax_mean_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_mean_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_mean_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp2 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp4 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp7 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp10 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp16 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp21 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp26 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp1 = -tmp0
tmp3 = tl_math.exp(tmp2)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tl_math.log(tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp1 * tmp14
tmp17 = -tmp16
tmp18 = tmp4 - tmp13
tmp19 = tmp17 * tmp18
tmp20 = tmp15 + tmp19
tmp22 = -tmp21
tmp23 = tmp7 - tmp13
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp27 = -tmp26
tmp28 = tmp10 - tmp13
tmp29 = tmp27 * tmp28
tmp30 = tmp25 + tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.sum(tmp31, 1)[:, None]
tmp34 = 64.0
tmp35 = tmp33 / tmp34
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp35, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pred], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [neg, pred, mul, sum_1, loss], Original ATen: [aten.neg, aten._log_softmax, aten.mul, aten.sum, aten.mean]
triton_per_fused__log_softmax_mean_mul_neg_sum_1.run(buf3, arg1_1, buf0, 1, 64, grid=grid(1), stream=stream0)
del arg1_1
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class cross_entropy_prob(nn.Module):
def __init__(self):
super(cross_entropy_prob, self).__init__()
def forward(self, pred, soft_targets):
pred = F.log_softmax(pred)
loss = torch.mean(torch.sum(-soft_targets * pred, 1))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp7 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp21 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp26 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp1 = -tmp0
tmp3 = tl_math.exp(tmp2)
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp3 + tmp5
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 + tmp11
tmp13 = tl_math.log(tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp1 * tmp14
tmp17 = -tmp16
tmp18 = tmp4 - tmp13
tmp19 = tmp17 * tmp18
tmp20 = tmp15 + tmp19
tmp22 = -tmp21
tmp23 = tmp7 - tmp13
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp27 = -tmp26
tmp28 = tmp10 - tmp13
tmp29 = tmp27 * tmp28
tmp30 = tmp25 + tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.sum(tmp31, 1)[:, None]
tmp34 = 64.0
tmp35 = tmp33 / tmp34
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf3,
arg1_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf3,
class cross_entropy_probNew(nn.Module):
def __init__(self):
super(cross_entropy_probNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zwx8981/DBCNN-Pytorch | cross_entropy_prob | false | 16,830 | [
"MIT"
] | 150 | 16c3156054a30a3eabb45dffcf538f42452a14f3 | https://github.com/zwx8981/DBCNN-Pytorch/tree/16c3156054a30a3eabb45dffcf538f42452a14f3 |
SelfAttentionBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/au/cauoqf2i5yygrdgqp2sxq5bcgn62ctjuoag627tkbl7ouvm72z7k.py
# Topologically Sorted Source Nodes: [layer_norm, q, k], Original ATen: [aten.native_layer_norm, aten.add]
# Source node to ATen node mapping:
# k => add_3
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# q => add_2
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_4), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_6), kwargs = {})
triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr6 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp12 = tmp8 + tmp11
tl.store(out_ptr0 + (x4), tmp8, xmask)
tl.store(out_ptr1 + (x4), tmp10, xmask)
tl.store(out_ptr2 + (x4), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xp/cxp3ouwpdhdlmipppq44wjaey2obmthzec7uqoddmpoigfmupxdx.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ja/cjaxbyotypv7l2y3vuo6nsl6372ye7azhfecsr7tomt77udx365i.py
# Topologically Sorted Source Nodes: [attn_1, attn_2, attn_3], Original ATen: [aten.mul, aten.add, aten._softmax]
# Source node to ATen node mapping:
# attn_1 => mul_2
# attn_2 => add_4
# attn_3 => amax, exp, sub_1, sum_1
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1.0), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_9), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_4, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_add_mul_3 = async_compile.triton('triton_poi_fused__softmax_add_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + (x2), tmp19, xmask)
tl.store(out_ptr1 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/64/c645eg6xdz4xivalrd2teinpkuqttgdn2vuvugzel3dbct6vtohz.py
# Topologically Sorted Source Nodes: [attn_1, attn_2, attn_3], Original ATen: [aten.mul, aten.add, aten._softmax]
# Source node to ATen node mapping:
# attn_1 => mul_2
# attn_2 => add_4
# attn_3 => amax, div, exp, sub_1
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1.0), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_9), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_4, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_add_mul_4 = async_compile.triton('triton_poi_fused__softmax_add_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 16
x5 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zq/czqeiybdb6mlnwo4hmrayt3c44g7hbps2ftgdd7x2mv3sr2mwjbn.py
# Topologically Sorted Source Nodes: [x_2, x_4, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => var_mean_1
# x_2 => add_5
# x_4 => add_6
# Graph fragment:
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_11), kwargs = {})
# %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_5), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_6, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (2))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6l/c6l5kzogqt6qgowb3zqvwwwqezmjn5mmwq5w672exeszre3xha3f.py
# Topologically Sorted Source Nodes: [x_2, x_4, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => add_7, add_8, mul_3, mul_4, rsqrt_1, sub_2
# x_2 => add_5
# x_4 => add_6
# Graph fragment:
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_11), kwargs = {})
# %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_5), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_7,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_6, %getitem_3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_12), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_13), kwargs = {})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ue/cue4pkpg3sz23hebnb3hyeupyssfhd2qlqy7a2dqcsdzfd7kphqh.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# x_6 => add_9, erf, mul_5, mul_6, mul_7
# Graph fragment:
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_19, 0.5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_19, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_6,), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_9), kwargs = {})
triton_poi_fused_gelu_7 = async_compile.triton('triton_poi_fused_gelu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fe/cfekjdsthatxjbbhgpigh2n2waatgzwuthjkuqadgzag4jvzvepw.py
# Topologically Sorted Source Nodes: [x_2, x_4, x_10], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_10 => add_10
# x_2 => add_5
# x_4 => add_6
# Graph fragment:
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_11), kwargs = {})
# %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_5), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %view_21), kwargs = {})
triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (16, 4), (4, 1))
assert_size_stride(primals_15, (16, ), (1, ))
assert_size_stride(primals_16, (4, 16), (16, 1))
assert_size_stride(primals_17, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm, q, k], Original ATen: [aten.native_layer_norm, aten.add]
triton_poi_fused_add_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, primals_4, primals_6, buf2, buf3, buf5, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_2
del primals_4
del primals_6
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf4)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf4, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
buf9 = reinterpret_tensor(buf4, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf6, buf9, 16, 4, grid=grid(16, 4), stream=stream0)
buf10 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf9, (16, 1, 4), (4, 0, 1), 0), out=buf10)
buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf6 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [attn_1, attn_2, attn_3], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_3.run(buf10, primals_9, buf11, buf12, 64, grid=grid(64), stream=stream0)
buf13 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [attn_1, attn_2, attn_3], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_4.run(buf13, primals_9, buf11, buf12, 256, grid=grid(256), stream=stream0)
del primals_9
buf14 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf7, buf14, 16, 4, grid=grid(16, 4), stream=stream0)
buf15 = reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf14, (16, 4, 1), (4, 1, 0), 0), out=buf15)
buf16 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf15, buf16, 16, 4, grid=grid(16, 4), stream=stream0)
buf17 = reinterpret_tensor(buf15, (16, 4), (4, 1), 0); del buf15 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf17)
buf18 = buf1; del buf1 # reuse
buf19 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_2, x_4, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_3, buf17, primals_11, buf18, buf19, 16, grid=grid(16), stream=stream0)
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_4, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_3, buf17, primals_11, buf18, buf19, primals_12, primals_13, buf20, 64, grid=grid(64), stream=stream0)
del buf18
del buf19
del primals_13
buf21 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, reinterpret_tensor(buf20, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf21)
del primals_15
buf22 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.gelu]
triton_poi_fused_gelu_7.run(buf21, buf22, 256, grid=grid(256), stream=stream0)
buf23 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf22, (16, 16), (16, 1), 0), reinterpret_tensor(primals_16, (16, 4), (1, 16), 0), out=buf23)
buf24 = reinterpret_tensor(buf23, (4, 4, 4), (16, 4, 1), 0); del buf23 # reuse
# Topologically Sorted Source Nodes: [x_2, x_4, x_10], Original ATen: [aten.add]
triton_poi_fused_add_8.run(buf24, primals_3, buf17, primals_11, primals_17, 64, grid=grid(64), stream=stream0)
del primals_17
return (buf24, primals_3, primals_11, primals_12, reinterpret_tensor(buf3, (16, 4), (4, 1), 0), reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), buf17, reinterpret_tensor(buf20, (16, 4), (4, 1), 0), buf21, reinterpret_tensor(buf22, (16, 16), (16, 1), 0), primals_16, primals_14, primals_10, reinterpret_tensor(buf14, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 4), 0), primals_8, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class SelfAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(SelfAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, x, q_ape, k_ape, attn_pos):
"""
Args:
x (torch.Tensor): (B, L, C)
q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding
Returns:
torch.Tensor: (B, L, C)
"""
B, N, C = x.shape
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
q = x + q_ape if q_ape is not None else x
q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = x + k_ape if k_ape is not None else x
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SelfAttentionBlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=
False):
super(SelfAttentionBlock, self).__init__()
self.norm1 = norm_layer(dim)
self.attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.drop_path = drop_path
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, x, q_ape, k_ape, attn_pos):
"""
Args:
x (torch.Tensor): (B, L, C)
q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding
Returns:
torch.Tensor: (B, L, C)
"""
x = x + self.drop_path(self.attn(self.norm1(x), q_ape, k_ape, attn_pos)
)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4]),
torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex % 16
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr6 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp12 = tmp8 + tmp11
tl.store(out_ptr0 + x4, tmp8, xmask)
tl.store(out_ptr1 + x4, tmp10, xmask)
tl.store(out_ptr2 + x4, tmp12, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_mul_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + x2, tmp19, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 16
x5 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_gelu_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (16, 4), (4, 1))
assert_size_stride(primals_15, (16,), (1,))
assert_size_stride(primals_16, (4, 16), (16, 1))
assert_size_stride(primals_17, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, primals_4, primals_6, buf2, buf3,
buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_2
del primals_4
del primals_6
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf4)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf4, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf4, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf4
triton_poi_fused_clone_2[grid(16, 4)](buf6, buf9, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf9, (16, 1, 4), (4, 0, 1), 0), out=buf10)
buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf6
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_3[grid(64)](buf10, primals_9,
buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf10
triton_poi_fused__softmax_add_mul_4[grid(256)](buf13, primals_9,
buf11, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf14 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf12
triton_poi_fused_clone_2[grid(16, 4)](buf7, buf14, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 1), 0)
del buf7
extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf14, (16, 4, 1), (4, 1, 0), 0), out=buf15)
buf16 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0)
del buf11
triton_poi_fused_clone_2[grid(16, 4)](buf15, buf16, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf17 = reinterpret_tensor(buf15, (16, 4), (4, 1), 0)
del buf15
extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf17)
buf18 = buf1
del buf1
buf19 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_3, buf17,
primals_11, buf18, buf19, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_3, buf17,
primals_11, buf18, buf19, primals_12, primals_13, buf20, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf18
del buf19
del primals_13
buf21 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_15, reinterpret_tensor(buf20, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_14, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf21)
del primals_15
buf22 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_7[grid(256)](buf21, buf22, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf23 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf22, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_16, (16, 4), (1, 16), 0), out=buf23)
buf24 = reinterpret_tensor(buf23, (4, 4, 4), (16, 4, 1), 0)
del buf23
triton_poi_fused_add_8[grid(64)](buf24, primals_3, buf17,
primals_11, primals_17, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_17
return buf24, primals_3, primals_11, primals_12, reinterpret_tensor(buf3,
(16, 4), (4, 1), 0), reinterpret_tensor(buf5, (16, 4), (4, 1), 0
), reinterpret_tensor(buf2, (16, 4), (4, 1), 0
), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0
), buf17, reinterpret_tensor(buf20, (16, 4), (4, 1), 0
), buf21, reinterpret_tensor(buf22, (16, 16), (16, 1), 0
), primals_16, primals_14, primals_10, reinterpret_tensor(buf14, (
16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 4), 0
), primals_8, primals_7, primals_5
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class SelfAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(SelfAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, x, q_ape, k_ape, attn_pos):
"""
Args:
x (torch.Tensor): (B, L, C)
q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding
Returns:
torch.Tensor: (B, L, C)
"""
B, N, C = x.shape
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
q = x + q_ape if q_ape is not None else x
q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = x + k_ape if k_ape is not None else x
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SelfAttentionBlockNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=
False):
super(SelfAttentionBlockNew, self).__init__()
self.norm1 = norm_layer(dim)
self.attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.drop_path = drop_path
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_4 = self.attn.q.weight
primals_5 = self.attn.k.weight
primals_6 = self.attn.v.weight
primals_7 = self.attn.proj.weight
primals_11 = self.attn.proj.bias
primals_12 = self.norm2.weight
primals_13 = self.norm2.bias
primals_14 = self.mlp.fc1.weight
primals_15 = self.mlp.fc1.bias
primals_16 = self.mlp.fc2.weight
primals_17 = self.mlp.fc2.bias
primals_3 = input_0
primals_8 = input_1
primals_9 = input_2
primals_10 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
| zhangzhengde0225/SwinTrack | SelfAttentionBlock | false | 16,831 | [
"MIT"
] | 143 | 526be17f8ef266cb924c6939bd8dda23e9b73249 | https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249 |
A2Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [A], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# A => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/op/copfv42gjlkhupvvole5flboufj2gput3d4cd6pwsekqnmv77cys.py
# Topologically Sorted Source Nodes: [B_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# B_2 => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float("-inf"))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp8 / tmp12
tl.store(out_ptr2 + (r2 + (16*x3)), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/uv/cuvvmsa7ltsxauddskilot47lh5bqu5ea2sefipbnns63kijehhg.py
# Topologically Sorted Source Nodes: [atten_2, out], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# atten_2 => convolution_3
# out => add
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_3, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %convolution_3), kwargs = {})
triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp2 = tl.load(in_ptr2 + (y0), ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + (x2 + (16*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [A], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [B], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [A], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
buf5 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [B_2], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf1, primals_5, buf5, 16, 16, grid=grid(16), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [G], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf5, (4, 16, 4), (64, 1, 16), 0), out=buf6)
# Topologically Sorted Source Nodes: [C], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf10 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [C_2], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf7, primals_7, buf10, 16, 16, grid=grid(16), stream=stream0)
del primals_7
buf11 = reinterpret_tensor(buf7, (4, 16, 4), (64, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [atten], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf10, (4, 16, 4), (64, 1, 16), 0), buf6, out=buf11)
# Topologically Sorted Source Nodes: [atten_2], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 1, 16, 4), 0), primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 1, 16, 4))
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [atten_2, out], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_2.run(primals_1, buf12, primals_9, buf13, 16, 16, grid=grid(16, 16), stream=stream0)
del buf12
del primals_9
return (buf13, primals_1, primals_2, primals_4, primals_6, primals_8, buf5, buf10, reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 1, 16, 4), 0), reinterpret_tensor(buf6, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (4, 16, 4), (64, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class A2Block(nn.Module):
"""
Implementation of A2Block(NIPS 2018)
"""
def __init__(self, inplane, plane):
super(A2Block, self).__init__()
self.down = nn.Conv2d(inplane, plane, 1)
self.up = nn.Conv2d(plane, inplane, 1)
self.gather_down = nn.Conv2d(inplane, plane, 1)
self.distribue_down = nn.Conv2d(inplane, plane, 1)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
res = x
A = self.down(res)
B = self.gather_down(res)
b, c, h, _w = A.size()
A = A.view(b, c, -1)
B = B.view(b, c, -1)
B = self.softmax(B)
B = B.permute(0, 2, 1)
G = torch.bmm(A, B)
C = self.distribue_down(res)
C = C.view(b, c, -1)
C = self.softmax(C)
C = C.permute(0, 2, 1)
atten = torch.bmm(C, G)
atten = atten.permute(0, 2, 1).view(b, c, h, -1)
atten = self.up(atten)
out = res + atten
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplane': 4, 'plane': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp8 / tmp12
tl.store(out_ptr2 + (r2 + 16 * x3), tmp13, xmask)
@triton.jit
def triton_poi_fused_add_convolution_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp2 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf2, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(16)](buf1, primals_5, buf5, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 16), (64, 16, 1),
0), reinterpret_tensor(buf5, (4, 16, 4), (64, 1, 16), 0), out=buf6)
buf7 = extern_kernels.convolution(primals_1, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf10 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0)
del buf1
triton_per_fused__softmax_1[grid(16)](buf7, primals_7, buf10, 16,
16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_7
buf11 = reinterpret_tensor(buf7, (4, 16, 4), (64, 4, 1), 0)
del buf7
extern_kernels.bmm(reinterpret_tensor(buf10, (4, 16, 4), (64, 1, 16
), 0), buf6, out=buf11)
buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (4, 4,
4, 4), (64, 1, 16, 4), 0), primals_8, stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 1, 16, 4))
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_2[grid(16, 16)](primals_1, buf12,
primals_9, buf13, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4,
num_stages=1)
del buf12
del primals_9
return (buf13, primals_1, primals_2, primals_4, primals_6, primals_8,
buf5, buf10, reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 1, 16, 4),
0), reinterpret_tensor(buf6, (4, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf2, (4, 16, 4), (64, 1, 16), 0))
class A2BlockNew(nn.Module):
"""
Implementation of A2Block(NIPS 2018)
"""
def __init__(self, inplane, plane):
super(A2BlockNew, self).__init__()
self.down = nn.Conv2d(inplane, plane, 1)
self.up = nn.Conv2d(plane, inplane, 1)
self.gather_down = nn.Conv2d(inplane, plane, 1)
self.distribue_down = nn.Conv2d(inplane, plane, 1)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_0):
primals_2 = self.down.weight
primals_3 = self.down.bias
primals_4 = self.up.weight
primals_5 = self.up.bias
primals_6 = self.gather_down.weight
primals_7 = self.gather_down.bias
primals_8 = self.distribue_down.weight
primals_9 = self.distribue_down.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| zj1008/GALD-DGCNet | A2Block | false | 16,832 | [
"MIT"
] | 127 | be7ebfe2b3d28ea28a2b4714852999d4af2a785e | https://github.com/zj1008/GALD-DGCNet/tree/be7ebfe2b3d28ea28a2b4714852999d4af2a785e |
BoundedSingleVar | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/5c/c5cxipks62frzpbwdp3p4bgkkwa534gdvmpupuj3gq7ou4vxp3jo.py
# Topologically Sorted Source Nodes: [mul, sigmoid, mul_1, add, add_1], Original ATen: [aten.mul, aten.sigmoid, aten.add]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# mul_1 => mul_1
# sigmoid => sigmoid
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_2, 0.0), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, 0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 4), kwargs = {})
triton_poi_fused_add_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 * tmp1
tmp5 = tl.sigmoid(tmp4)
tmp6 = tmp5 * tmp1
tmp7 = tmp2 + tmp6
tmp8 = 4.0
tmp9 = tmp7 + tmp8
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sigmoid, mul_1, add, add_1], Original ATen: [aten.mul, aten.sigmoid, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_0.run(primals_1, primals_2, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
return (buf0, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class BoundedSingleVar(torch.nn.Module):
"""Wrapper a single parameter to represent an unknown coefficient in inverse problem with the upper and lower bound.
:param lower_bound: The lower bound for the parameter.
:type lower_bound: float
:param upper_bound: The upper bound for the parameter.
:type upper_bound: float
"""
def __init__(self, lower_bound, upper_bound):
super().__init__()
self.value = torch.nn.Parameter(torch.Tensor([0.0]))
self.layer = torch.nn.Sigmoid()
self.ub, self.lb = upper_bound, lower_bound
def forward(self, x) ->torch.Tensor:
return x[:, :1] * 0.0 + self.layer(self.value) * (self.ub - self.lb
) + self.lb
def get_value(self) ->torch.Tensor:
return self.layer(self.value) * (self.ub - self.lb) + self.lb
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'lower_bound': 4, 'upper_bound': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 * tmp1
tmp5 = tl.sigmoid(tmp4)
tmp6 = tmp5 * tmp1
tmp7 = tmp2 + tmp6
tmp8 = 4.0
tmp9 = tmp7 + tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_0[grid(64)](primals_1, primals_2,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
return buf0, primals_2
class BoundedSingleVarNew(torch.nn.Module):
"""Wrapper a single parameter to represent an unknown coefficient in inverse problem with the upper and lower bound.
:param lower_bound: The lower bound for the parameter.
:type lower_bound: float
:param upper_bound: The upper bound for the parameter.
:type upper_bound: float
"""
def __init__(self, lower_bound, upper_bound):
super().__init__()
self.value = torch.nn.Parameter(torch.Tensor([0.0]))
self.layer = torch.nn.Sigmoid()
self.ub, self.lb = upper_bound, lower_bound
def get_value(self) ->torch.Tensor:
return self.layer(self.value) * (self.ub - self.lb) + self.lb
def forward(self, input_0):
primals_2 = self.value
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| zweien/idrlnet | BoundedSingleVar | false | 16,833 | [
"Apache-2.0"
] | 66 | 3a19a3301d565c0906aac84ff31eefcff75726a8 | https://github.com/zweien/idrlnet/tree/3a19a3301d565c0906aac84ff31eefcff75726a8 |
FcCat | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ie/ciettq2a3562jfpgfe75iig4ki2hbm6pmbwujlvp6mw26i2odufm.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %view_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_2, buf0, buf1, 512, grid=grid(512), stream=stream0)
del buf0
return (buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class FcCat(nn.Module):
def __init__(self, nIn, nOut):
super(FcCat, self).__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, x):
out = torch.cat((x, self.fc(x)), 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nIn': 4, 'nOut': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_2, buf0, buf1, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del buf0
return buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0)
class FcCatNew(nn.Module):
def __init__(self, nIn, nOut):
super(FcCatNew, self).__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| zwh930712/densenet.pytorch | FcCat | false | 16,834 | [
"Apache-2.0"
] | 826 | d1cd5e1957975628286e516512c6d1c14430f810 | https://github.com/zwh930712/densenet.pytorch/tree/d1cd5e1957975628286e516512c6d1c14430f810 |
CrossAttentionBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jv/cjvj7wak42bj5fbnlu6ck636s7bvidq3sm5aon3sqruvbkcmikpz.py
# Topologically Sorted Source Nodes: [layer_norm_1, k], Original ATen: [aten.native_layer_norm, aten.add]
# Source node to ATen node mapping:
# k => add_5
# layer_norm_1 => add_2, add_3, mul_2, mul_3, rsqrt_1, sub_1, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_6, [2]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_6, %getitem_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_4), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_5), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %primals_9), kwargs = {})
triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + (x2), xmask)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/j6/cj6aurlygcekohdwc3pqcsluwzbysxccau6ydamvyfq6noqoapwt.py
# Topologically Sorted Source Nodes: [layer_norm, q], Original ATen: [aten.native_layer_norm, aten.add]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# q => add_4
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_2 = async_compile.triton('triton_poi_fused_add_native_layer_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + (x2), xmask)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xt/cxtkkmujo4ytg6ycpz5lk5livtstr63pg5nsf5ijewjbtrfrqx6k.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6o/c6oubce6opr4zk3zweskejxawnbhom76y6okst3ysdsgziawllfn.py
# Topologically Sorted Source Nodes: [attn_1, attn_2, attn_3], Original ATen: [aten.mul, aten.add, aten._softmax]
# Source node to ATen node mapping:
# attn_1 => mul_4
# attn_2 => add_6
# attn_3 => amax, exp, sub_2, sum_1
# Graph fragment:
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1.0), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_12), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_6, [-1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_6, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_add_mul_4 = async_compile.triton('triton_poi_fused__softmax_add_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + (x2), tmp19, xmask)
tl.store(out_ptr1 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yu/cyujjdajffibfckrsrgjejqwkfk3tlaooegvtewwm747fyejqtxq.py
# Topologically Sorted Source Nodes: [attn_1, attn_2, attn_3], Original ATen: [aten.mul, aten.add, aten._softmax]
# Source node to ATen node mapping:
# attn_1 => mul_4
# attn_2 => add_6
# attn_3 => amax, div, exp, sub_2
# Graph fragment:
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1.0), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_12), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_6, [-1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_6, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_add_mul_5 = async_compile.triton('triton_poi_fused__softmax_add_mul_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/py/cpyvyuh4nptcbfj562tz3svitnubcs7ve2plukym7ogrnohcl6an.py
# Topologically Sorted Source Nodes: [x_2, q_2, layer_norm_2], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_2 => var_mean_2
# q_2 => add_8
# x_2 => add_7
# Graph fragment:
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_14), kwargs = {})
# %add_8 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_7), kwargs = {})
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_8, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (2))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rd/crdlrdvbru4nlobctm3cvw7qf3jvm3pl7iulggydowoqcby3wcbp.py
# Topologically Sorted Source Nodes: [x_2, q_2, layer_norm_2], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_2 => add_10, add_9, mul_5, mul_6, rsqrt_2, sub_3
# q_2 => add_8
# x_2 => add_7
# Graph fragment:
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_14), kwargs = {})
# %add_8 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_7), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_9,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %getitem_5), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %rsqrt_2), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %primals_15), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_16), kwargs = {})
triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/og/cogzoasids5wueasyz2ghot5xek7h4i7xadaeptfhjgyduz3qkc3.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# x_5 => add_11, erf, mul_7, mul_8, mul_9
# Graph fragment:
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_19, 0.5), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_19, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_8,), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, %add_11), kwargs = {})
triton_poi_fused_gelu_8 = async_compile.triton('triton_poi_fused_gelu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4e/c4eeufkdwzwmlxzlmefvshxqlxn2bg33die3l6sanygikx7amrcp.py
# Topologically Sorted Source Nodes: [x_2, q_2, q_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# q_2 => add_8
# q_3 => add_12
# x_2 => add_7
# Graph fragment:
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_14), kwargs = {})
# %add_8 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_7), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %view_21), kwargs = {})
triton_poi_fused_add_9 = async_compile.triton('triton_poi_fused_add_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (16, 4), (4, 1))
assert_size_stride(primals_18, (16, ), (1, ))
assert_size_stride(primals_19, (4, 16), (16, 1))
assert_size_stride(primals_20, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_0.run(primals_6, buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm_1, k], Original ATen: [aten.native_layer_norm, aten.add]
triton_poi_fused_add_native_layer_norm_1.run(primals_6, buf2, buf3, primals_4, primals_5, primals_9, buf4, buf7, 64, grid=grid(64), stream=stream0)
del buf2
del buf3
del primals_4
del primals_5
del primals_9
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm, q], Original ATen: [aten.native_layer_norm, aten.add]
triton_poi_fused_add_native_layer_norm_2.run(primals_3, buf0, buf1, primals_1, primals_2, primals_7, buf5, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_2
del primals_7
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf8)
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf6, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
buf11 = reinterpret_tensor(buf6, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf8, buf11, 16, 4, grid=grid(16, 4), stream=stream0)
buf12 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf11, (16, 1, 4), (4, 0, 1), 0), out=buf12)
buf13 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf8 # reuse
buf14 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [attn_1, attn_2, attn_3], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_4.run(buf12, primals_12, buf13, buf14, 64, grid=grid(64), stream=stream0)
buf15 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [attn_1, attn_2, attn_3], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_5.run(buf15, primals_12, buf13, buf14, 256, grid=grid(256), stream=stream0)
del primals_12
buf16 = reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf14 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf9, buf16, 16, 4, grid=grid(16, 4), stream=stream0)
buf17 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf16, (16, 4, 1), (4, 1, 0), 0), out=buf17)
buf18 = reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf17, buf18, 16, 4, grid=grid(16, 4), stream=stream0)
buf19 = reinterpret_tensor(buf17, (16, 4), (4, 1), 0); del buf17 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf19)
buf20 = buf1; del buf1 # reuse
buf21 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_2, q_2, layer_norm_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_3, buf19, primals_14, buf20, buf21, 16, grid=grid(16), stream=stream0)
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, q_2, layer_norm_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_7.run(primals_3, buf19, primals_14, buf20, buf21, primals_15, primals_16, buf22, 64, grid=grid(64), stream=stream0)
del buf20
del buf21
del primals_16
buf23 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_18, reinterpret_tensor(buf22, (16, 4), (4, 1), 0), reinterpret_tensor(primals_17, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf23)
del primals_18
buf24 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.gelu]
triton_poi_fused_gelu_8.run(buf23, buf24, 256, grid=grid(256), stream=stream0)
buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf24, (16, 16), (16, 1), 0), reinterpret_tensor(primals_19, (16, 4), (1, 16), 0), out=buf25)
buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0); del buf25 # reuse
# Topologically Sorted Source Nodes: [x_2, q_2, q_3], Original ATen: [aten.add]
triton_poi_fused_add_9.run(buf26, primals_3, buf19, primals_14, primals_20, 64, grid=grid(64), stream=stream0)
del primals_20
return (buf26, primals_3, primals_6, primals_14, primals_15, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (16, 4), (4, 1), 0), buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), buf19, reinterpret_tensor(buf22, (16, 4), (4, 1), 0), buf23, reinterpret_tensor(buf24, (16, 16), (16, 1), 0), primals_19, primals_17, primals_13, reinterpret_tensor(buf16, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 4), 0), primals_11, primals_10, primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(CrossAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, q, kv, q_ape, k_ape, attn_pos):
"""
Args:
q (torch.Tensor): (B, L_q, C)
kv (torch.Tensor): (B, L_kv, C)
q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding
Returns:
torch.Tensor: (B, L_q, C)
"""
B, q_N, C = q.shape
kv_N = kv.shape[1]
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
else:
q = q + q_ape if q_ape is not None else q
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = kv + k_ape if k_ape is not None else kv
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, q_N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class CrossAttentionBlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=
False):
super(CrossAttentionBlock, self).__init__()
self.norm1_q = norm_layer(dim)
self.norm1_kv = norm_layer(dim)
self.attn = CrossAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.drop_path = drop_path
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, q, kv, q_ape, k_ape, attn_pos):
"""
Args:
q (torch.Tensor): (B, L_q, C)
kv (torch.Tensor): (B, L_kv, C)
q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding
Returns:
torch.Tensor: (B, L_q, C)
"""
q = q + self.drop_path(self.attn(self.norm1_q(q), self.norm1_kv(kv),
q_ape, k_ape, attn_pos))
q = q + self.drop_path(self.mlp(self.norm2(q)))
return q
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x2, xmask)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x2, xmask)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + x2, tmp19, xmask)
tl.store(out_ptr1 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_5(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_gelu_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (16, 4), (4, 1))
assert_size_stride(primals_18, (16,), (1,))
assert_size_stride(primals_19, (4, 16), (16, 1))
assert_size_stride(primals_20, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_6, buf2,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(64)](primals_6, buf2,
buf3, primals_4, primals_5, primals_9, buf4, buf7, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf2
del buf3
del primals_4
del primals_5
del primals_9
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, primals_7, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
del primals_2
del primals_7
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf8)
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf6, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf6, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf6
triton_poi_fused_clone_3[grid(16, 4)](buf8, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf11, (16, 1, 4), (4, 0, 1), 0), out=buf12)
buf13 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf8
buf14 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_4[grid(64)](buf12, primals_12,
buf13, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf12
triton_poi_fused__softmax_add_mul_5[grid(256)](buf15, primals_12,
buf13, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_12
buf16 = reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf14
triton_poi_fused_clone_3[grid(16, 4)](buf9, buf16, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf17 = reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 1), 0)
del buf9
extern_kernels.bmm(reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf16, (16, 4, 1), (4, 1, 0), 0), out=buf17)
buf18 = reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0)
del buf13
triton_poi_fused_clone_3[grid(16, 4)](buf17, buf18, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf19 = reinterpret_tensor(buf17, (16, 4), (4, 1), 0)
del buf17
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf19)
buf20 = buf1
del buf1
buf21 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_6[grid(16)](primals_3, buf19,
primals_14, buf20, buf21, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_3, buf19,
primals_14, buf20, buf21, primals_15, primals_16, buf22, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf20
del buf21
del primals_16
buf23 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_18, reinterpret_tensor(buf22, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_17, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf23)
del primals_18
buf24 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_8[grid(256)](buf23, buf24, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf24, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_19, (16, 4), (1, 16), 0), out=buf25)
buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0)
del buf25
triton_poi_fused_add_9[grid(64)](buf26, primals_3, buf19,
primals_14, primals_20, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_20
return (buf26, primals_3, primals_6, primals_14, primals_15,
reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(
buf7, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (16, 4), (4, 1),
0), buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), buf19,
reinterpret_tensor(buf22, (16, 4), (4, 1), 0), buf23,
reinterpret_tensor(buf24, (16, 16), (16, 1), 0), primals_19,
primals_17, primals_13, reinterpret_tensor(buf16, (16, 1, 4), (4, 1,
1), 0), reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 4), 0), primals_11,
primals_10, primals_8)
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(CrossAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, q, kv, q_ape, k_ape, attn_pos):
"""
Args:
q (torch.Tensor): (B, L_q, C)
kv (torch.Tensor): (B, L_kv, C)
q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding
Returns:
torch.Tensor: (B, L_q, C)
"""
B, q_N, C = q.shape
kv_N = kv.shape[1]
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
else:
q = q + q_ape if q_ape is not None else q
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = kv + k_ape if k_ape is not None else kv
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, q_N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class CrossAttentionBlockNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=
False):
super(CrossAttentionBlockNew, self).__init__()
self.norm1_q = norm_layer(dim)
self.norm1_kv = norm_layer(dim)
self.attn = CrossAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.drop_path = drop_path
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, input_0, input_1, input_2, input_3, input_4):
primals_1 = self.norm1_q.weight
primals_2 = self.norm1_q.bias
primals_4 = self.norm1_kv.weight
primals_5 = self.norm1_kv.bias
primals_8 = self.attn.q.weight
primals_10 = self.attn.k.weight
primals_11 = self.attn.v.weight
primals_13 = self.attn.proj.weight
primals_14 = self.attn.proj.bias
primals_15 = self.norm2.weight
primals_16 = self.norm2.bias
primals_17 = self.mlp.fc1.weight
primals_18 = self.mlp.fc1.bias
primals_19 = self.mlp.fc2.weight
primals_20 = self.mlp.fc2.bias
primals_3 = input_0
primals_6 = input_1
primals_7 = input_2
primals_9 = input_3
primals_12 = input_4
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20])
return output[0]
| zhangzhengde0225/SwinTrack | CrossAttentionBlock | false | 16,835 | [
"MIT"
] | 143 | 526be17f8ef266cb924c6939bd8dda23e9b73249 | https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249 |
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/es/cesrwuwkeluuf67vbj367seoctnhxwytz3gxjpe3dmncvzr46phg.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %mm], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (8*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/d2/cd2iuy4xijl6opmrfcyh5n4ktkq3vxdew7ukyggi4oe4nmubofvx.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %mm_1], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
x1 = (xindex // 8)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (12*x1)), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf0 = reinterpret_tensor(buf2, (4, 4), (8, 1), 4) # alias
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf2, (4, 4), (8, 1), 0) # alias
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_2, buf1, 16, grid=grid(16), stream=stream0)
buf5 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf3 = reinterpret_tensor(buf5, (4, 4), (12, 1), 8) # alias
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(buf2, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf3)
buf4 = reinterpret_tensor(buf5, (4, 8), (12, 1), 0) # alias
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf2, buf4, 32, grid=grid(32), stream=stream0)
return (buf5, primals_2, buf2, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class FcCat(nn.Module):
def __init__(self, nIn, nOut):
super(FcCat, self).__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, x):
out = torch.cat((x, self.fc(x)), 1)
return out
class Net(nn.Module):
def __init__(self, nFeatures, nHidden1, nHidden2):
super(Net, self).__init__()
self.l1 = FcCat(nFeatures, nHidden1)
self.l2 = FcCat(nFeatures + nHidden1, nHidden2)
def forward(self, x):
out = self.l1(x)
out = self.l2(out)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nFeatures': 4, 'nHidden1': 4, 'nHidden2': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
x1 = xindex // 8
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 12 * x1), tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf0 = reinterpret_tensor(buf2, (4, 4), (8, 1), 4)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf2, (4, 4), (8, 1), 0)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf3 = reinterpret_tensor(buf5, (4, 4), (12, 1), 8)
extern_kernels.mm(buf2, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf3)
buf4 = reinterpret_tensor(buf5, (4, 8), (12, 1), 0)
triton_poi_fused_cat_1[grid(32)](buf2, buf4, 32, XBLOCK=32,
num_warps=1, num_stages=1)
return buf5, primals_2, buf2, primals_3
class FcCat(nn.Module):
def __init__(self, nIn, nOut):
super(FcCat, self).__init__()
self.fc = nn.Linear(nIn, nOut, bias=False)
def forward(self, x):
out = torch.cat((x, self.fc(x)), 1)
return out
class NetNew(nn.Module):
def __init__(self, nFeatures, nHidden1, nHidden2):
super(NetNew, self).__init__()
self.l1 = FcCat(nFeatures, nHidden1)
self.l2 = FcCat(nFeatures + nHidden1, nHidden2)
def forward(self, input_0):
primals_1 = self.l1.fc.weight
primals_3 = self.l2.fc.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| zwh930712/densenet.pytorch | Net | false | 16,836 | [
"Apache-2.0"
] | 826 | d1cd5e1957975628286e516512c6d1c14430f810 | https://github.com/zwh930712/densenet.pytorch/tree/d1cd5e1957975628286e516512c6d1c14430f810 |
SpatialSoftmaxBZ | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/rq/crqom5wr56ilbnvvekic54ftgpfauuywdgkv4ufa7zpsh2g4oazw.py
# Topologically Sorted Source Nodes: [softmax, mul, expected_x, mul_1, expected_y, expected_xy], Original ATen: [aten._softmax, aten.mul, aten.sum, aten.stack]
# Source node to ATen node mapping:
# expected_x => sum_2
# expected_xy => cat
# expected_y => sum_3
# mul => mul
# mul_1 => mul_1
# softmax => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %div), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %div), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], 2), kwargs = {})
triton_per_fused__softmax_mul_stack_sum_0 = async_compile.triton('triton_per_fused__softmax_mul_stack_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_mul_stack_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_mul_stack_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp11 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (r1), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp6 / tmp10
tmp13 = tmp11 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp19 = tmp18 * tmp12
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(xmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = -tmp17
tmp25 = 1.0
tmp26 = tmp24 + tmp25
tmp27 = 0.5
tmp28 = tmp26 * tmp27
tl.store(out_ptr4 + (2*x0), tmp28, xmask)
tl.store(out_ptr5 + (2*x0), tmp23, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (16, ), (1, ))
assert_size_stride(arg2_1, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
buf4 = reinterpret_tensor(buf6, (4, 4, 1), (8, 2, 1), 0) # alias
buf5 = reinterpret_tensor(buf6, (4, 4, 1), (8, 2, 1), 1) # alias
# Topologically Sorted Source Nodes: [softmax, mul, expected_x, mul_1, expected_y, expected_xy], Original ATen: [aten._softmax, aten.mul, aten.sum, aten.stack]
stream0 = get_raw_stream(0)
triton_per_fused__softmax_mul_stack_sum_0.run(arg0_1, arg1_1, arg2_1, buf4, buf5, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
class SpatialSoftmaxBZ(torch.nn.Module):
"""
IMPORTANT:
i in [0, 1], where 0 is at the bottom, 1 is at the top
j in [-1, 1]
"""
def __init__(self, height, width):
super().__init__()
self.height = height
self.width = width
pos_x, pos_y = np.meshgrid(np.linspace(-1.0, 1.0, self.height), np.
linspace(-1.0, 1.0, self.width))
self.pos_x = torch.from_numpy(pos_x).reshape(-1).float()
self.pos_x = torch.nn.Parameter(self.pos_x, requires_grad=False)
self.pos_y = torch.from_numpy(pos_y).reshape(-1).float()
self.pos_y = torch.nn.Parameter(self.pos_y, requires_grad=False)
def forward(self, feature):
flattened = feature.view(feature.shape[0], feature.shape[1], -1)
softmax = F.softmax(flattened, dim=-1)
expected_x = torch.sum(self.pos_y * softmax, dim=-1)
expected_x = (-expected_x + 1) / 2.0
expected_y = torch.sum(self.pos_x * softmax, dim=-1)
expected_xy = torch.stack([expected_x, expected_y], dim=2)
return expected_xy
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'height': 4, 'width': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_mul_stack_sum_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp11 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp6 / tmp10
tmp13 = tmp11 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp19 = tmp18 * tmp12
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(xmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = -tmp17
tmp25 = 1.0
tmp26 = tmp24 + tmp25
tmp27 = 0.5
tmp28 = tmp26 * tmp27
tl.store(out_ptr4 + 2 * x0, tmp28, xmask)
tl.store(out_ptr5 + 2 * x0, tmp23, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (16,), (1,))
assert_size_stride(arg2_1, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
buf4 = reinterpret_tensor(buf6, (4, 4, 1), (8, 2, 1), 0)
buf5 = reinterpret_tensor(buf6, (4, 4, 1), (8, 2, 1), 1)
get_raw_stream(0)
triton_per_fused__softmax_mul_stack_sum_0[grid(16)](arg0_1, arg1_1,
arg2_1, buf4, buf5, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf6,
class SpatialSoftmaxBZNew(torch.nn.Module):
"""
IMPORTANT:
i in [0, 1], where 0 is at the bottom, 1 is at the top
j in [-1, 1]
"""
def __init__(self, height, width):
super().__init__()
self.height = height
self.width = width
pos_x, pos_y = np.meshgrid(np.linspace(-1.0, 1.0, self.height), np.
linspace(-1.0, 1.0, self.width))
self.pos_x = torch.from_numpy(pos_x).reshape(-1).float()
self.pos_x = torch.nn.Parameter(self.pos_x, requires_grad=False)
self.pos_y = torch.from_numpy(pos_y).reshape(-1).float()
self.pos_y = torch.nn.Parameter(self.pos_y, requires_grad=False)
def forward(self, input_0):
arg1_1 = self.pos_x
arg2_1 = self.pos_y
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| zwc662/SequentialAttack | SpatialSoftmaxBZ | false | 16,837 | [
"MIT"
] | 116 | 677b19c51ea76d794939ee126fccd75ffa0e6fe6 | https://github.com/zwc662/SequentialAttack/tree/677b19c51ea76d794939ee126fccd75ffa0e6fe6 |
AttentionLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ij/cijdb53bbr7xmchvgwsaycilukstpjt4i37au76j36liczd74ygg.py
# Topologically Sorted Source Nodes: [mul, attn_scores, softmax], Original ATen: [aten.mul, aten.sum, aten._softmax]
# Source node to ATen node mapping:
# attn_scores => sum_1
# mul => mul
# softmax => amax, div, exp, sub, sum_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %unsqueeze), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%permute_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {})
triton_poi_fused__softmax_mul_sum_0 = async_compile.triton('triton_poi_fused__softmax_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tmp14 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 / tmp16
tl.store(in_out_ptr0 + (x0), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4x/c4xlvyf5ygu6k4l3eqhefyv22mq43o46tinzhvos5q2juudono6t.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%sum_3, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ck/cckhwikstbb4m4psyrplb7xzmtkjdnu3g55dkjthnlsrk4ij5vpw.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_2 => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mm_1,), kwargs = {})
triton_poi_fused_tanh_2 = async_compile.triton('triton_poi_fused_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1), (1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [mul, attn_scores, softmax], Original ATen: [aten.mul, aten.sum, aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_mul_sum_0.run(buf2, primals_3, buf0, 4, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf2, primals_3, primals_2, buf3, 32, grid=grid(32), stream=stream0)
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(buf3, reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh]
triton_poi_fused_tanh_2.run(buf5, 16, grid=grid(16), stream=stream0)
return (buf5, reinterpret_tensor(buf2, (1, 4), (1, 1), 0), primals_2, primals_3, buf2, buf3, buf5, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.utils.data
import torch.distributed
import torch.nn as nn
import torch.optim
import torch.optim.lr_scheduler
def Linear(in_features, out_features, bias=True, dropout=0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.uniform_(-0.1, 0.1)
if bias:
m.bias.data.uniform_(-0.1, 0.1)
return m
class AttentionLayer(nn.Module):
def __init__(self, input_embed_dim, output_embed_dim):
super().__init__()
self.input_proj = Linear(input_embed_dim, output_embed_dim, bias=False)
self.output_proj = Linear(2 * output_embed_dim, output_embed_dim,
bias=False)
def forward(self, input, source_hids):
x = self.input_proj(input)
attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2)
attn_scores = F.softmax(attn_scores.t(), dim=1).t()
x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0)
x = F.tanh(self.output_proj(torch.cat((x, input), dim=1)))
return x, attn_scores
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_embed_dim': 4, 'output_embed_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
import torch.distributed
import torch.nn as nn
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tmp14 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 / tmp16
tl.store(in_out_ptr0 + x0, tmp17, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x2, tmp14, xmask)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1), (1, 1), 0)
del buf1
get_raw_stream(0)
triton_poi_fused__softmax_mul_sum_0[grid(4)](buf2, primals_3, buf0,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_1[grid(32)](buf2, primals_3, primals_2, buf3,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf4 = buf0
del buf0
extern_kernels.mm(buf3, reinterpret_tensor(primals_4, (8, 4), (1, 8
), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_tanh_2[grid(16)](buf5, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf5, reinterpret_tensor(buf2, (1, 4), (1, 1), 0
), primals_2, primals_3, buf2, buf3, buf5, primals_4
def Linear(in_features, out_features, bias=True, dropout=0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.uniform_(-0.1, 0.1)
if bias:
m.bias.data.uniform_(-0.1, 0.1)
return m
class AttentionLayerNew(nn.Module):
def __init__(self, input_embed_dim, output_embed_dim):
super().__init__()
self.input_proj = Linear(input_embed_dim, output_embed_dim, bias=False)
self.output_proj = Linear(2 * output_embed_dim, output_embed_dim,
bias=False)
def forward(self, input_0, input_1):
primals_1 = self.input_proj.weight
primals_4 = self.output_proj.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
| zsquaredz/XSum | AttentionLayer | false | 16,838 | [
"MIT"
] | 235 | 10f2fac2e70801e7a3973c864b5a24b61d3f8bfe | https://github.com/zsquaredz/XSum/tree/10f2fac2e70801e7a3973c864b5a24b61d3f8bfe |
PSNR | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/nv/cnvhuia7crizj32ndnegecbcifyviwcvedgkhchfhsy556zzrhzu.py
# Topologically Sorted Source Nodes: [mse], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mse => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1]), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = 255.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = triton_helpers.minimum(tmp4, tmp1)
tmp6 = libdevice.nearbyint(tmp5)
tmp8 = tmp7 * tmp1
tmp9 = triton_helpers.maximum(tmp8, tmp3)
tmp10 = triton_helpers.minimum(tmp9, tmp1)
tmp11 = libdevice.nearbyint(tmp10)
tmp12 = tmp6 - tmp11
tmp13 = 0.00392156862745098
tmp14 = tmp12 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/47/c47uaghu4jphn4mfwcpdwxhwcbkl44jrwo3daxqnbvrcdihmek6i.py
# Topologically Sorted Source Nodes: [mse, log10, psnr, mean_1], Original ATen: [aten.mean, aten.log10, aten.mul]
# Source node to ATen node mapping:
# log10 => log10
# mean_1 => mean_1
# mse => mean
# psnr => mul_2
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1]), kwargs = {})
# %log10 : [num_users=1] = call_function[target=torch.ops.aten.log10.default](args = (%mean,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log10, -10), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_2,), kwargs = {})
triton_per_fused_log10_mean_mul_1 = async_compile.triton('triton_per_fused_log10_mean_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_log10_mean_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_log10_mean_mul_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 64.0
tmp2 = tmp0 / tmp1
tmp3 = libdevice.log10(tmp2)
tmp4 = -10.0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mse], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(arg0_1, arg1_1, buf0, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mse, log10, psnr, mean_1], Original ATen: [aten.mean, aten.log10, aten.mul]
triton_per_fused_log10_mean_mul_1.run(buf2, buf0, 1, 4, grid=grid(1), stream=stream0)
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn.modules.loss import _Loss
class PSNR(_Loss):
def __init__(self):
super(PSNR, self).__init__()
self.val_range = 255
def _quantize(self, img):
img = img * self.val_range
img = img.clamp(0, self.val_range).round()
return img
def forward(self, x, y):
diff = self._quantize(x) - self._quantize(y)
if x.dim() == 3:
n = 1
elif x.dim() == 4:
n = x.size(0)
elif x.dim() == 5:
n = x.size(0) * x.size(1)
mse = diff.div(self.val_range).pow(2).view(n, -1).mean(dim=-1)
psnr = -10 * mse.log10()
return psnr.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = 255.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = triton_helpers.minimum(tmp4, tmp1)
tmp6 = libdevice.nearbyint(tmp5)
tmp8 = tmp7 * tmp1
tmp9 = triton_helpers.maximum(tmp8, tmp3)
tmp10 = triton_helpers.minimum(tmp9, tmp1)
tmp11 = libdevice.nearbyint(tmp10)
tmp12 = tmp6 - tmp11
tmp13 = 0.00392156862745098
tmp14 = tmp12 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tl.store(out_ptr0 + x0, tmp19, xmask)
@triton.jit
def triton_per_fused_log10_mean_mul_1(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 64.0
tmp2 = tmp0 / tmp1
tmp3 = libdevice.log10(tmp2)
tmp4 = -10.0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_0[grid(4)](arg0_1, arg1_1, buf0, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_log10_mean_mul_1[grid(1)](buf2, buf0, 1, 4, XBLOCK
=1, num_warps=2, num_stages=1)
del buf0
return buf2,
class PSNRNew(_Loss):
def __init__(self):
super(PSNRNew, self).__init__()
self.val_range = 255
def _quantize(self, img):
img = img * self.val_range
img = img.clamp(0, self.val_range).round()
return img
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zzh-tech/RSCD | PSNR | false | 16,839 | [
"MIT"
] | 57 | b287b1621121f8ca7ece6b27ebd4e28a5f8e6f5e | https://github.com/zzh-tech/RSCD/tree/b287b1621121f8ca7ece6b27ebd4e28a5f8e6f5e |
DenseLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/74/c74xlrcdmmc2trtur5rpkyhcpr6eqjx5iq3jf77lbu462fmxu3zc.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %relu], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4s/c4sk3j6bynrnc25abteokiyffg626j5o2tqgjhdcibavx37q4342.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_2, buf0, buf1, 512, grid=grid(512), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf0, buf2, 256, grid=grid(256), stream=stream0)
del buf0
return (buf1, primals_1, primals_2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def actFunc(act, *args, **kwargs):
act = act.lower()
if act == 'relu':
return nn.ReLU()
elif act == 'relu6':
return nn.ReLU6()
elif act == 'leakyrelu':
return nn.LeakyReLU(0.1)
elif act == 'prelu':
return nn.PReLU()
elif act == 'rrelu':
return nn.RReLU(0.1, 0.3)
elif act == 'selu':
return nn.SELU()
elif act == 'celu':
return nn.CELU()
elif act == 'elu':
return nn.ELU()
elif act == 'gelu':
return nn.GELU()
elif act == 'tanh':
return nn.Tanh()
else:
raise NotImplementedError
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
class DenseLayer(nn.Module):
"""
Dense layer for residual dense block
"""
def __init__(self, in_chs, growth_rate, activation='relu'):
super(DenseLayer, self).__init__()
self.conv = conv3x3(in_chs, growth_rate)
self.act = actFunc(activation)
def forward(self, x):
out = self.act(self.conv(x))
out = torch.cat((x, out), 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_chs': 4, 'growth_rate': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_2, buf0, buf1, 512,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf0, buf2,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
return buf1, primals_1, primals_2, buf2
def actFunc(act, *args, **kwargs):
act = act.lower()
if act == 'relu':
return nn.ReLU()
elif act == 'relu6':
return nn.ReLU6()
elif act == 'leakyrelu':
return nn.LeakyReLU(0.1)
elif act == 'prelu':
return nn.PReLU()
elif act == 'rrelu':
return nn.RReLU(0.1, 0.3)
elif act == 'selu':
return nn.SELU()
elif act == 'celu':
return nn.CELU()
elif act == 'elu':
return nn.ELU()
elif act == 'gelu':
return nn.GELU()
elif act == 'tanh':
return nn.Tanh()
else:
raise NotImplementedError
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
class DenseLayerNew(nn.Module):
"""
Dense layer for residual dense block
"""
def __init__(self, in_chs, growth_rate, activation='relu'):
super(DenseLayerNew, self).__init__()
self.conv = conv3x3(in_chs, growth_rate)
self.act = actFunc(activation)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| zzh-tech/RSCD | DenseLayer | false | 16,840 | [
"MIT"
] | 57 | b287b1621121f8ca7ece6b27ebd4e28a5f8e6f5e | https://github.com/zzh-tech/RSCD/tree/b287b1621121f8ca7ece6b27ebd4e28a5f8e6f5e |
TxtNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/as/casyowhej6uclixf7ii4zor625rml3avsukhdm5lizfb77gxkeo3.py
# Topologically Sorted Source Nodes: [feat], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# feat => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/w6/cw6oxhjusphciyrwtdv7hsbyu66awgqilvgwaa6h4lpwx63s4prd.py
# Topologically Sorted Source Nodes: [mul, code], Original ATen: [aten.mul, aten.tanh]
# Source node to ATen node mapping:
# code => tanh
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 1.0), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul,), kwargs = {})
triton_poi_fused_mul_tanh_1 = async_compile.triton('triton_poi_fused_mul_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4096, 4), (4, 1))
assert_size_stride(primals_2, (4096, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4096), (4096, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4096), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4096), (65536, 16384, 4096, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [feat], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hid], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4096), (4096, 1), 0), reinterpret_tensor(primals_4, (4096, 4), (1, 4096), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, code], Original ATen: [aten.mul, aten.tanh]
triton_poi_fused_mul_tanh_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
return (buf1, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4096, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class TxtNet(nn.Module):
def __init__(self, code_len, txt_feat_len):
super(TxtNet, self).__init__()
self.fc1 = nn.Linear(txt_feat_len, 4096)
self.fc2 = nn.Linear(4096, code_len)
self.alpha = 1.0
def forward(self, x):
feat = F.relu(self.fc1(x))
hid = self.fc2(feat)
code = F.tanh(self.alpha * hid)
return feat, hid, code
def set_alpha(self, epoch):
self.alpha = math.pow(1.0 * epoch + 1.0, 0.5)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'code_len': 4, 'txt_feat_len': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4096, 4), (4, 1))
assert_size_stride(primals_2, (4096,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4096), (4096, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4096), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4096), (65536, 16384,
4096, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(262144)](buf1, primals_2, 262144,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4096),
(4096, 1), 0), reinterpret_tensor(primals_4, (4096, 4), (1,
4096), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_tanh_1[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_4
class TxtNetNew(nn.Module):
def __init__(self, code_len, txt_feat_len):
super(TxtNetNew, self).__init__()
self.fc1 = nn.Linear(txt_feat_len, 4096)
self.fc2 = nn.Linear(4096, code_len)
self.alpha = 1.0
def set_alpha(self, epoch):
self.alpha = math.pow(1.0 * epoch + 1.0, 0.5)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2]
| zzs1994/DJsRH | TxtNet | false | 16,841 | [
"MIT"
] | 53 | 6041c2df810723dd0052e2e5b7c6bd33033f0f21 | https://github.com/zzs1994/DJsRH/tree/6041c2df810723dd0052e2e5b7c6bd33033f0f21 |
FeatureFusion | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# q => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# q => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xp/cxp3ouwpdhdlmipppq44wjaey2obmthzec7uqoddmpoigfmupxdx.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fp/cfpv2y3anijofjm2npw5ake2ybgpfcbtruhcdw6u6vhv6b3jy3o2.py
# Topologically Sorted Source Nodes: [attn_1, attn_2, attn_3], Original ATen: [aten.mul, aten.add, aten._softmax]
# Source node to ATen node mapping:
# attn_1 => mul_2
# attn_2 => add_2
# attn_3 => amax, exp, sub_1, sum_1
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1.0), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_2, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_add_mul_3 = async_compile.triton('triton_poi_fused__softmax_add_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + (x0), tmp19, xmask)
tl.store(out_ptr1 + (x0), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/c7/cc7u22xdds5tvjp2vc6ywemkw2nqkrlnta6v3yxi5lhebkxnhcnd.py
# Topologically Sorted Source Nodes: [attn_1, attn_2, attn_3], Original ATen: [aten.mul, aten.add, aten._softmax]
# Source node to ATen node mapping:
# attn_1 => mul_2
# attn_2 => add_2
# attn_3 => amax, div, exp, sub_1
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1.0), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_2, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_add_mul_4 = async_compile.triton('triton_poi_fused__softmax_add_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (x2), xmask)
tmp5 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zq/czqeiybdb6mlnwo4hmrayt3c44g7hbps2ftgdd7x2mv3sr2mwjbn.py
# Topologically Sorted Source Nodes: [x_2, z, q_4], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# q_4 => var_mean_2
# x_2 => add_3
# z => add_4
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_9), kwargs = {})
# %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_3), kwargs = {})
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_4, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (2))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/b2/cb2uf5lvmxtxcwricr7fgkd55d3u5fy3be42hq6ihy2pyexsmo52.py
# Topologically Sorted Source Nodes: [x_6, x_8, k_2, q_6], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# k_2 => add_12, add_13, mul_8, mul_9, rsqrt_3, sub_5
# q_6 => add_18, mul_12
# x_6 => add_8
# x_8 => add_9
# Graph fragment:
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_35, %primals_18), kwargs = {})
# %add_9 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_12, %add_8), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt_3 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_12,), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_9, %getitem_7), kwargs = {})
# %mul_8 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %rsqrt_3), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_8, %primals_21), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %primals_22), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_8, %primals_29), kwargs = {})
# %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_12, %primals_30), kwargs = {})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr7 + (x0), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr8 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tmp17 = tmp11 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr1 + (x2), tmp15, xmask)
tl.store(out_ptr2 + (x2), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rd/crdlrdvbru4nlobctm3cvw7qf3jvm3pl7iulggydowoqcby3wcbp.py
# Topologically Sorted Source Nodes: [x_2, z, q_4], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# q_4 => add_10, add_11, mul_6, mul_7, rsqrt_2, sub_4
# x_2 => add_3
# z => add_4
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_9), kwargs = {})
# %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_3), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_10,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %getitem_5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %rsqrt_2), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %primals_19), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %primals_20), kwargs = {})
triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fe/cfekjdsthatxjbbhgpigh2n2waatgzwuthjkuqadgzag4jvzvepw.py
# Topologically Sorted Source Nodes: [x_2, z, x_11, z_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_11 => add_15
# x_2 => add_3
# z => add_4
# z_1 => add_16
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_9), kwargs = {})
# %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_3), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_53, %primals_28), kwargs = {})
# %add_16 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %add_15), kwargs = {})
triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4s/c4s53vwtathps5cx2f3p4z27pqeuyf7rkwdfjgy2puuj6luhmiaq.py
# Topologically Sorted Source Nodes: [k_4, layer_norm_6], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# k_4 => add_19, add_20, mul_13, mul_14, rsqrt_5, sub_8, var_mean_5
# layer_norm_6 => add_25, mul_17
# Graph fragment:
# %var_mean_5 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_16, [2]), kwargs = {correction: 0, keepdim: True})
# %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_10, 1e-05), kwargs = {})
# %rsqrt_5 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_19,), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_16, %getitem_11), kwargs = {})
# %mul_13 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %rsqrt_5), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_13, %primals_31), kwargs = {})
# %add_20 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_14, %primals_32), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_13, %primals_39), kwargs = {})
# %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_17, %primals_40), kwargs = {})
triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp4 * tmp9
tmp12 = tmp10 + tmp11
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/b4/cb43jhxvcrefkhdp7ixdoh6nmvez5h55vhlzkxtasuovu5ru7pe5.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# x_19 => add_26, erf, mul_18, mul_19, mul_20
# Graph fragment:
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_73, 0.5), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_73, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_19,), kwargs = {})
# %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_18, %add_26), kwargs = {})
triton_poi_fused_gelu_10 = async_compile.triton('triton_poi_fused_gelu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/io/ciotcs6lafnolr453xtypj4dcvqei4fjfmjd3o53vubfflstlhlu.py
# Topologically Sorted Source Nodes: [z_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# z_2 => add_27
# Graph fragment:
# %add_27 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_16, %view_75), kwargs = {})
triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4, ), (1, ))
assert_size_stride(primals_19, (4, ), (1, ))
assert_size_stride(primals_20, (4, ), (1, ))
assert_size_stride(primals_21, (4, ), (1, ))
assert_size_stride(primals_22, (4, ), (1, ))
assert_size_stride(primals_23, (4, 4), (4, 1))
assert_size_stride(primals_24, (4, 4), (4, 1))
assert_size_stride(primals_25, (4, 4), (4, 1))
assert_size_stride(primals_26, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_27, (4, 4), (4, 1))
assert_size_stride(primals_28, (4, ), (1, ))
assert_size_stride(primals_29, (4, ), (1, ))
assert_size_stride(primals_30, (4, ), (1, ))
assert_size_stride(primals_31, (4, ), (1, ))
assert_size_stride(primals_32, (4, ), (1, ))
assert_size_stride(primals_33, (4, 4), (4, 1))
assert_size_stride(primals_34, (4, 4), (4, 1))
assert_size_stride(primals_35, (4, 4), (4, 1))
assert_size_stride(primals_36, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_37, (4, 4), (4, 1))
assert_size_stride(primals_38, (4, ), (1, ))
assert_size_stride(primals_39, (4, ), (1, ))
assert_size_stride(primals_40, (4, ), (1, ))
assert_size_stride(primals_41, (16, 4), (4, 1))
assert_size_stride(primals_42, (16, ), (1, ))
assert_size_stride(primals_43, (4, 16), (16, 1))
assert_size_stride(primals_44, (4, ), (1, ))
assert_size_stride(primals_45, (4, ), (1, ))
assert_size_stride(primals_46, (4, ), (1, ))
assert_size_stride(primals_47, (16, 4), (4, 1))
assert_size_stride(primals_48, (16, ), (1, ))
assert_size_stride(primals_49, (4, 16), (16, 1))
assert_size_stride(primals_50, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf3, buf6, 16, 4, grid=grid(16, 4), stream=stream0)
buf7 = reinterpret_tensor(buf3, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf4, buf7, 16, 4, grid=grid(16, 4), stream=stream0)
buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 0, 1), 0), out=buf8)
buf9 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf4 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [attn_1, attn_2, attn_3], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_3.run(buf8, primals_7, buf9, buf10, 64, grid=grid(64), stream=stream0)
buf11 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [attn_1, attn_2, attn_3], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_4.run(buf11, primals_7, buf9, buf10, 256, grid=grid(256), stream=stream0)
del primals_7
buf12 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf5, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
buf13 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 0), 0), out=buf13)
buf14 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf13, buf14, 16, 4, grid=grid(16, 4), stream=stream0)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf15)
buf16 = buf1; del buf1 # reuse
buf17 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [q_2], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_0.run(primals_12, buf16, buf17, 16, grid=grid(16), stream=stream0)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_2], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_12, buf16, buf17, primals_10, primals_11, buf18, 64, grid=grid(64), stream=stream0)
del primals_10
del primals_11
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf19)
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_5], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf20)
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf21)
buf22 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_5], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf19, buf22, 16, 4, grid=grid(16, 4), stream=stream0)
buf23 = reinterpret_tensor(buf19, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf19 # reuse
# Topologically Sorted Source Nodes: [attn_5], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf20, buf23, 16, 4, grid=grid(16, 4), stream=stream0)
buf24 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_5], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf23, (16, 1, 4), (4, 0, 1), 0), out=buf24)
buf25 = reinterpret_tensor(buf20, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf20 # reuse
buf26 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [attn_6, attn_7, attn_8], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_3.run(buf24, primals_16, buf25, buf26, 64, grid=grid(64), stream=stream0)
buf27 = reinterpret_tensor(buf24, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf24 # reuse
# Topologically Sorted Source Nodes: [attn_6, attn_7, attn_8], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_4.run(buf27, primals_16, buf25, buf26, 256, grid=grid(256), stream=stream0)
del primals_16
buf28 = reinterpret_tensor(buf26, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf26 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf21, buf28, 16, 4, grid=grid(16, 4), stream=stream0)
buf29 = reinterpret_tensor(buf21, (16, 4, 1), (4, 1, 1), 0); del buf21 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf27, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf28, (16, 4, 1), (4, 1, 0), 0), out=buf29)
buf30 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0); del buf25 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf29, buf30, 16, 4, grid=grid(16, 4), stream=stream0)
buf31 = reinterpret_tensor(buf29, (16, 4), (4, 1), 0); del buf29 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf30, (16, 4), (4, 1), 0), reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf31)
buf32 = buf17; del buf17 # reuse
buf33 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [x_2, z, q_4], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_3, buf15, primals_9, buf32, buf33, 16, grid=grid(16), stream=stream0)
buf34 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf35 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_6, x_8, k_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_12, buf31, primals_18, buf34, buf35, 16, grid=grid(16), stream=stream0)
buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf55 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6, x_8, k_2, q_6], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_12, buf31, primals_18, buf34, buf35, primals_21, primals_22, primals_29, primals_30, buf39, buf55, 64, grid=grid(64), stream=stream0)
del buf34
del buf35
del primals_22
del primals_30
buf37 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, z, q_4], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_7.run(primals_3, buf15, primals_9, buf32, buf33, primals_19, primals_20, buf37, 64, grid=grid(64), stream=stream0)
del primals_20
buf38 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_8], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf37, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 4), (1, 4), 0), out=buf38)
buf40 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_9], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0), reinterpret_tensor(primals_24, (4, 4), (1, 4), 0), out=buf40)
buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_10], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), out=buf41)
buf42 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_10], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf38, buf42, 16, 4, grid=grid(16, 4), stream=stream0)
buf43 = reinterpret_tensor(buf38, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf38 # reuse
# Topologically Sorted Source Nodes: [attn_10], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf40, buf43, 16, 4, grid=grid(16, 4), stream=stream0)
buf44 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_10], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf42, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf43, (16, 1, 4), (4, 0, 1), 0), out=buf44)
buf45 = reinterpret_tensor(buf40, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf40 # reuse
buf46 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [attn_11, attn_12, attn_13], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_3.run(buf44, primals_26, buf45, buf46, 64, grid=grid(64), stream=stream0)
buf47 = reinterpret_tensor(buf44, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf44 # reuse
# Topologically Sorted Source Nodes: [attn_11, attn_12, attn_13], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_4.run(buf47, primals_26, buf45, buf46, 256, grid=grid(256), stream=stream0)
del primals_26
buf48 = reinterpret_tensor(buf46, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf46 # reuse
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf41, buf48, 16, 4, grid=grid(16, 4), stream=stream0)
buf49 = reinterpret_tensor(buf41, (16, 4, 1), (4, 1, 1), 0); del buf41 # reuse
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf47, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf48, (16, 4, 1), (4, 1, 0), 0), out=buf49)
buf50 = reinterpret_tensor(buf45, (4, 4, 4), (16, 4, 1), 0); del buf45 # reuse
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf49, buf50, 16, 4, grid=grid(16, 4), stream=stream0)
buf51 = reinterpret_tensor(buf49, (16, 4), (4, 1), 0); del buf49 # reuse
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf50, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf51)
buf52 = reinterpret_tensor(buf51, (4, 4, 4), (16, 4, 1), 0); del buf51 # reuse
# Topologically Sorted Source Nodes: [x_2, z, x_11, z_1], Original ATen: [aten.add]
triton_poi_fused_add_8.run(buf52, primals_3, buf15, primals_9, primals_28, 64, grid=grid(64), stream=stream0)
del primals_28
buf53 = buf33; del buf33 # reuse
buf54 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [k_4], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_0.run(buf52, buf53, buf54, 16, grid=grid(16), stream=stream0)
buf56 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_12], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf55, (16, 4), (4, 1), 0), reinterpret_tensor(primals_33, (4, 4), (1, 4), 0), out=buf56)
buf57 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf71 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k_4, layer_norm_6], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_9.run(buf52, buf53, buf54, primals_31, primals_32, primals_39, primals_40, buf57, buf71, 64, grid=grid(64), stream=stream0)
del primals_32
del primals_40
buf58 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_13], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf57, (16, 4), (4, 1), 0), reinterpret_tensor(primals_34, (4, 4), (1, 4), 0), out=buf58)
buf59 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_14], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf57, (16, 4), (4, 1), 0), reinterpret_tensor(primals_35, (4, 4), (1, 4), 0), out=buf59)
buf60 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_15], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf56, buf60, 16, 4, grid=grid(16, 4), stream=stream0)
buf61 = reinterpret_tensor(buf56, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf56 # reuse
# Topologically Sorted Source Nodes: [attn_15], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf58, buf61, 16, 4, grid=grid(16, 4), stream=stream0)
buf62 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_15], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf60, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf61, (16, 1, 4), (4, 0, 1), 0), out=buf62)
buf63 = reinterpret_tensor(buf58, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf58 # reuse
buf64 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [attn_16, attn_17, attn_18], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_3.run(buf62, primals_36, buf63, buf64, 64, grid=grid(64), stream=stream0)
buf65 = reinterpret_tensor(buf62, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf62 # reuse
# Topologically Sorted Source Nodes: [attn_16, attn_17, attn_18], Original ATen: [aten.mul, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_4.run(buf65, primals_36, buf63, buf64, 256, grid=grid(256), stream=stream0)
del primals_36
buf66 = reinterpret_tensor(buf64, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf64 # reuse
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf59, buf66, 16, 4, grid=grid(16, 4), stream=stream0)
buf67 = reinterpret_tensor(buf59, (16, 4, 1), (4, 1, 1), 0); del buf59 # reuse
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf65, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf66, (16, 4, 1), (4, 1, 0), 0), out=buf67)
buf68 = reinterpret_tensor(buf63, (4, 4, 4), (16, 4, 1), 0); del buf63 # reuse
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf67, buf68, 16, 4, grid=grid(16, 4), stream=stream0)
buf69 = reinterpret_tensor(buf67, (16, 4), (4, 1), 0); del buf67 # reuse
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf68, (16, 4), (4, 1), 0), reinterpret_tensor(primals_37, (4, 4), (1, 4), 0), out=buf69)
buf70 = reinterpret_tensor(buf69, (4, 4, 4), (16, 4, 1), 0); del buf69 # reuse
# Topologically Sorted Source Nodes: [x_6, x_8, x_15, x_17], Original ATen: [aten.add]
triton_poi_fused_add_8.run(buf70, primals_12, buf31, primals_18, primals_38, 64, grid=grid(64), stream=stream0)
del primals_38
buf72 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_42, reinterpret_tensor(buf71, (16, 4), (4, 1), 0), reinterpret_tensor(primals_41, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf72)
del primals_42
buf73 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.gelu]
triton_poi_fused_gelu_10.run(buf72, buf73, 256, grid=grid(256), stream=stream0)
buf74 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf73, (16, 16), (16, 1), 0), reinterpret_tensor(primals_43, (16, 4), (1, 16), 0), out=buf74)
buf75 = reinterpret_tensor(buf74, (4, 4, 4), (16, 4, 1), 0); del buf74 # reuse
# Topologically Sorted Source Nodes: [z_2], Original ATen: [aten.add]
triton_poi_fused_add_11.run(buf75, buf52, primals_44, 64, grid=grid(64), stream=stream0)
del primals_44
buf76 = buf54; del buf54 # reuse
buf77 = buf53; del buf53 # reuse
# Topologically Sorted Source Nodes: [layer_norm_7], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_0.run(buf70, buf76, buf77, 16, grid=grid(16), stream=stream0)
buf78 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm_7], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(buf70, buf76, buf77, primals_45, primals_46, buf78, 64, grid=grid(64), stream=stream0)
del buf76
del buf77
del primals_46
buf79 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_23], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_48, reinterpret_tensor(buf78, (16, 4), (4, 1), 0), reinterpret_tensor(primals_47, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf79)
del primals_48
buf80 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.gelu]
triton_poi_fused_gelu_10.run(buf79, buf80, 256, grid=grid(256), stream=stream0)
buf81 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf80, (16, 16), (16, 1), 0), reinterpret_tensor(primals_49, (16, 4), (1, 16), 0), out=buf81)
buf82 = reinterpret_tensor(buf81, (4, 4, 4), (16, 4, 1), 0); del buf81 # reuse
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add]
triton_poi_fused_add_11.run(buf82, buf70, primals_50, 64, grid=grid(64), stream=stream0)
del primals_50
return (buf75, buf82, primals_3, primals_9, primals_12, primals_18, primals_19, primals_21, primals_29, primals_31, primals_39, primals_45, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf11, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), buf27, reinterpret_tensor(buf30, (16, 4), (4, 1), 0), buf31, reinterpret_tensor(buf37, (16, 4), (4, 1), 0), reinterpret_tensor(buf39, (16, 4), (4, 1), 0), buf47, reinterpret_tensor(buf50, (16, 4), (4, 1), 0), buf52, reinterpret_tensor(buf55, (16, 4), (4, 1), 0), reinterpret_tensor(buf57, (16, 4), (4, 1), 0), buf65, reinterpret_tensor(buf68, (16, 4), (4, 1), 0), buf70, reinterpret_tensor(buf71, (16, 4), (4, 1), 0), buf72, reinterpret_tensor(buf73, (16, 16), (16, 1), 0), reinterpret_tensor(buf78, (16, 4), (4, 1), 0), buf79, reinterpret_tensor(buf80, (16, 16), (16, 1), 0), primals_49, primals_47, primals_43, primals_41, primals_37, reinterpret_tensor(buf66, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf60, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf61, (16, 4, 1), (4, 1, 4), 0), primals_35, primals_34, primals_33, primals_27, reinterpret_tensor(buf48, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf42, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf43, (16, 4, 1), (4, 1, 4), 0), primals_25, primals_24, primals_23, primals_17, reinterpret_tensor(buf28, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf23, (16, 4, 1), (4, 1, 4), 0), primals_15, primals_14, primals_13, primals_8, reinterpret_tensor(buf12, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 4), 0), primals_6, primals_5, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(CrossAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, q, kv, q_ape, k_ape, attn_pos):
"""
Args:
q (torch.Tensor): (B, L_q, C)
kv (torch.Tensor): (B, L_kv, C)
q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding
Returns:
torch.Tensor: (B, L_q, C)
"""
B, q_N, C = q.shape
kv_N = kv.shape[1]
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
else:
q = q + q_ape if q_ape is not None else q
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = kv + k_ape if k_ape is not None else kv
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, q_N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SelfAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(SelfAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, x, q_ape, k_ape, attn_pos):
"""
Args:
x (torch.Tensor): (B, L, C)
q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding
Returns:
torch.Tensor: (B, L, C)
"""
B, N, C = x.shape
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
q = x + q_ape if q_ape is not None else x
q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = x + k_ape if k_ape is not None else x
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class FeatureFusion(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=
False):
super(FeatureFusion, self).__init__()
self.z_norm1 = norm_layer(dim)
self.x_norm1 = norm_layer(dim)
self.z_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.x_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.z_norm2_1 = norm_layer(dim)
self.z_norm2_2 = norm_layer(dim)
self.x_norm2_1 = norm_layer(dim)
self.x_norm2_2 = norm_layer(dim)
self.z_x_cross_attention = CrossAttention(dim, num_heads, qkv_bias,
qk_scale, attn_drop, drop, attn_pos_encoding_only)
self.x_z_cross_attention = CrossAttention(dim, num_heads, qkv_bias,
qk_scale, attn_drop, drop, attn_pos_encoding_only)
mlp_hidden_dim = int(dim * mlp_ratio)
self.z_norm3 = norm_layer(dim)
self.x_norm3 = norm_layer(dim)
self.z_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.x_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.drop_path = drop_path
def forward(self, z, x, z_self_attn_pos, x_self_attn_pos,
z_x_cross_attn_pos, x_z_cross_attn_pos):
z = z + self.drop_path(self.z_self_attn(self.z_norm1(z), None, None,
z_self_attn_pos))
x = x + self.drop_path(self.x_self_attn(self.x_norm1(x), None, None,
x_self_attn_pos))
z = z + self.drop_path(self.z_x_cross_attention(self.z_norm2_1(z),
self.x_norm2_1(x), None, None, z_x_cross_attn_pos))
x = x + self.drop_path(self.x_z_cross_attention(self.x_norm2_2(x),
self.z_norm2_2(z), None, None, x_z_cross_attn_pos))
z = z + self.drop_path(self.z_mlp(self.z_norm3(z)))
x = x + self.drop_path(self.x_mlp(self.x_norm3(x)))
return z, x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.
rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_mul_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp5 * tmp1
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.maximum(tmp4, tmp8)
tmp11 = tmp10 * tmp1
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp20 = tmp4 - tmp19
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp8 - tmp19
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp19
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp18 - tmp19
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + x0, tmp19, xmask)
tl.store(out_ptr1 + x0, tmp30, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_4(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + x2, xmask)
tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr8 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tmp17 = tmp11 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr1 + x2, tmp15, xmask)
tl.store(out_ptr2 + x2, tmp19, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp4 * tmp9
tmp12 = tmp10 + tmp11
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4,), (1,))
assert_size_stride(primals_20, (4,), (1,))
assert_size_stride(primals_21, (4,), (1,))
assert_size_stride(primals_22, (4,), (1,))
assert_size_stride(primals_23, (4, 4), (4, 1))
assert_size_stride(primals_24, (4, 4), (4, 1))
assert_size_stride(primals_25, (4, 4), (4, 1))
assert_size_stride(primals_26, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_27, (4, 4), (4, 1))
assert_size_stride(primals_28, (4,), (1,))
assert_size_stride(primals_29, (4,), (1,))
assert_size_stride(primals_30, (4,), (1,))
assert_size_stride(primals_31, (4,), (1,))
assert_size_stride(primals_32, (4,), (1,))
assert_size_stride(primals_33, (4, 4), (4, 1))
assert_size_stride(primals_34, (4, 4), (4, 1))
assert_size_stride(primals_35, (4, 4), (4, 1))
assert_size_stride(primals_36, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_37, (4, 4), (4, 1))
assert_size_stride(primals_38, (4,), (1,))
assert_size_stride(primals_39, (4,), (1,))
assert_size_stride(primals_40, (4,), (1,))
assert_size_stride(primals_41, (16, 4), (4, 1))
assert_size_stride(primals_42, (16,), (1,))
assert_size_stride(primals_43, (4, 16), (16, 1))
assert_size_stride(primals_44, (4,), (1,))
assert_size_stride(primals_45, (4,), (1,))
assert_size_stride(primals_46, (4,), (1,))
assert_size_stride(primals_47, (16, 4), (4, 1))
assert_size_stride(primals_48, (16,), (1,))
assert_size_stride(primals_49, (4, 16), (16, 1))
assert_size_stride(primals_50, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf3, buf6, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf3, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf3
triton_poi_fused_clone_2[grid(16, 4)](buf4, buf7, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf7, (16, 1, 4), (4, 0, 1), 0), out=buf8)
buf9 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf4
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_3[grid(64)](buf8, primals_7, buf9,
buf10, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused__softmax_add_mul_4[grid(256)](buf11, primals_7,
buf9, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf12 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf9
triton_poi_fused_clone_2[grid(16, 4)](buf5, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 0), 0), out=buf13)
buf14 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0)
del buf10
triton_poi_fused_clone_2[grid(16, 4)](buf13, buf14, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0)
del buf13
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf15)
buf16 = buf1
del buf1
buf17 = buf0
del buf0
triton_poi_fused_native_layer_norm_0[grid(16)](primals_12, buf16,
buf17, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_12, buf16,
buf17, primals_10, primals_11, buf18, 64, XBLOCK=64, num_warps=
1, num_stages=1)
del primals_10
del primals_11
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf19)
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf20)
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf21)
buf22 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf19, buf22, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf23 = reinterpret_tensor(buf19, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf19
triton_poi_fused_clone_2[grid(16, 4)](buf20, buf23, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf24 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf23, (16, 1, 4), (4, 0, 1), 0), out=buf24)
buf25 = reinterpret_tensor(buf20, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf20
buf26 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_3[grid(64)](buf24, primals_16,
buf25, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf27 = reinterpret_tensor(buf24, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf24
triton_poi_fused__softmax_add_mul_4[grid(256)](buf27, primals_16,
buf25, buf26, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_16
buf28 = reinterpret_tensor(buf26, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf26
triton_poi_fused_clone_2[grid(16, 4)](buf21, buf28, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf29 = reinterpret_tensor(buf21, (16, 4, 1), (4, 1, 1), 0)
del buf21
extern_kernels.bmm(reinterpret_tensor(buf27, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf28, (16, 4, 1), (4, 1, 0), 0), out=buf29)
buf30 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0)
del buf25
triton_poi_fused_clone_2[grid(16, 4)](buf29, buf30, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf31 = reinterpret_tensor(buf29, (16, 4), (4, 1), 0)
del buf29
extern_kernels.mm(reinterpret_tensor(buf30, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf31)
buf32 = buf17
del buf17
buf33 = buf16
del buf16
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_3, buf15,
primals_9, buf32, buf33, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf34 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf35 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_12,
buf31, primals_18, buf34, buf35, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf55 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_12,
buf31, primals_18, buf34, buf35, primals_21, primals_22,
primals_29, primals_30, buf39, buf55, 64, XBLOCK=64, num_warps=
1, num_stages=1)
del buf34
del buf35
del primals_22
del primals_30
buf37 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_3, buf15,
primals_9, buf32, buf33, primals_19, primals_20, buf37, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_20
buf38 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf37, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_23, (4, 4), (1, 4), 0), out=buf38)
buf40 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_24, (4, 4), (1, 4), 0), out=buf40)
buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), out=buf41)
buf42 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf38, buf42, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf43 = reinterpret_tensor(buf38, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf38
triton_poi_fused_clone_2[grid(16, 4)](buf40, buf43, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf44 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf42, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf43, (16, 1, 4), (4, 0, 1), 0), out=buf44)
buf45 = reinterpret_tensor(buf40, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf40
buf46 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_3[grid(64)](buf44, primals_26,
buf45, buf46, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf47 = reinterpret_tensor(buf44, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf44
triton_poi_fused__softmax_add_mul_4[grid(256)](buf47, primals_26,
buf45, buf46, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_26
buf48 = reinterpret_tensor(buf46, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf46
triton_poi_fused_clone_2[grid(16, 4)](buf41, buf48, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf49 = reinterpret_tensor(buf41, (16, 4, 1), (4, 1, 1), 0)
del buf41
extern_kernels.bmm(reinterpret_tensor(buf47, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf48, (16, 4, 1), (4, 1, 0), 0), out=buf49)
buf50 = reinterpret_tensor(buf45, (4, 4, 4), (16, 4, 1), 0)
del buf45
triton_poi_fused_clone_2[grid(16, 4)](buf49, buf50, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf51 = reinterpret_tensor(buf49, (16, 4), (4, 1), 0)
del buf49
extern_kernels.mm(reinterpret_tensor(buf50, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf51)
buf52 = reinterpret_tensor(buf51, (4, 4, 4), (16, 4, 1), 0)
del buf51
triton_poi_fused_add_8[grid(64)](buf52, primals_3, buf15, primals_9,
primals_28, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_28
buf53 = buf33
del buf33
buf54 = buf32
del buf32
triton_poi_fused_native_layer_norm_0[grid(16)](buf52, buf53, buf54,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf56 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf55, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_33, (4, 4), (1, 4), 0), out=buf56)
buf57 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf71 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(64)](buf52, buf53, buf54,
primals_31, primals_32, primals_39, primals_40, buf57, buf71,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_32
del primals_40
buf58 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf57, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_34, (4, 4), (1, 4), 0), out=buf58)
buf59 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf57, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_35, (4, 4), (1, 4), 0), out=buf59)
buf60 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf56, buf60, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf61 = reinterpret_tensor(buf56, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf56
triton_poi_fused_clone_2[grid(16, 4)](buf58, buf61, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf62 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf60, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf61, (16, 1, 4), (4, 0, 1), 0), out=buf62)
buf63 = reinterpret_tensor(buf58, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf58
buf64 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_3[grid(64)](buf62, primals_36,
buf63, buf64, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf65 = reinterpret_tensor(buf62, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf62
triton_poi_fused__softmax_add_mul_4[grid(256)](buf65, primals_36,
buf63, buf64, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_36
buf66 = reinterpret_tensor(buf64, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf64
triton_poi_fused_clone_2[grid(16, 4)](buf59, buf66, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf67 = reinterpret_tensor(buf59, (16, 4, 1), (4, 1, 1), 0)
del buf59
extern_kernels.bmm(reinterpret_tensor(buf65, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf66, (16, 4, 1), (4, 1, 0), 0), out=buf67)
buf68 = reinterpret_tensor(buf63, (4, 4, 4), (16, 4, 1), 0)
del buf63
triton_poi_fused_clone_2[grid(16, 4)](buf67, buf68, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf69 = reinterpret_tensor(buf67, (16, 4), (4, 1), 0)
del buf67
extern_kernels.mm(reinterpret_tensor(buf68, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_37, (4, 4), (1, 4), 0), out=buf69)
buf70 = reinterpret_tensor(buf69, (4, 4, 4), (16, 4, 1), 0)
del buf69
triton_poi_fused_add_8[grid(64)](buf70, primals_12, buf31,
primals_18, primals_38, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_38
buf72 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_42, reinterpret_tensor(buf71, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_41, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf72)
del primals_42
buf73 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_10[grid(256)](buf72, buf73, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf74 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf73, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_43, (16, 4), (1, 16), 0), out=buf74)
buf75 = reinterpret_tensor(buf74, (4, 4, 4), (16, 4, 1), 0)
del buf74
triton_poi_fused_add_11[grid(64)](buf75, buf52, primals_44, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_44
buf76 = buf54
del buf54
buf77 = buf53
del buf53
triton_poi_fused_native_layer_norm_0[grid(16)](buf70, buf76, buf77,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf78 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](buf70, buf76, buf77,
primals_45, primals_46, buf78, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf76
del buf77
del primals_46
buf79 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_48, reinterpret_tensor(buf78, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_47, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf79)
del primals_48
buf80 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_10[grid(256)](buf79, buf80, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf81 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf80, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_49, (16, 4), (1, 16), 0), out=buf81)
buf82 = reinterpret_tensor(buf81, (4, 4, 4), (16, 4, 1), 0)
del buf81
triton_poi_fused_add_11[grid(64)](buf82, buf70, primals_50, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_50
return (buf75, buf82, primals_3, primals_9, primals_12, primals_18,
primals_19, primals_21, primals_29, primals_31, primals_39,
primals_45, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf11,
reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf15,
reinterpret_tensor(buf18, (16, 4), (4, 1), 0), buf27,
reinterpret_tensor(buf30, (16, 4), (4, 1), 0), buf31,
reinterpret_tensor(buf37, (16, 4), (4, 1), 0), reinterpret_tensor(
buf39, (16, 4), (4, 1), 0), buf47, reinterpret_tensor(buf50, (16, 4
), (4, 1), 0), buf52, reinterpret_tensor(buf55, (16, 4), (4, 1), 0),
reinterpret_tensor(buf57, (16, 4), (4, 1), 0), buf65,
reinterpret_tensor(buf68, (16, 4), (4, 1), 0), buf70,
reinterpret_tensor(buf71, (16, 4), (4, 1), 0), buf72,
reinterpret_tensor(buf73, (16, 16), (16, 1), 0), reinterpret_tensor
(buf78, (16, 4), (4, 1), 0), buf79, reinterpret_tensor(buf80, (16,
16), (16, 1), 0), primals_49, primals_47, primals_43, primals_41,
primals_37, reinterpret_tensor(buf66, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf60, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf61, (16, 4, 1), (4, 1, 4), 0), primals_35,
primals_34, primals_33, primals_27, reinterpret_tensor(buf48, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf42, (16, 1, 4), (4, 1,
1), 0), reinterpret_tensor(buf43, (16, 4, 1), (4, 1, 4), 0),
primals_25, primals_24, primals_23, primals_17, reinterpret_tensor(
buf28, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (16, 1,
4), (4, 1, 1), 0), reinterpret_tensor(buf23, (16, 4, 1), (4, 1, 4),
0), primals_15, primals_14, primals_13, primals_8,
reinterpret_tensor(buf12, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 4), 0), primals_6,
primals_5, primals_4)
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(CrossAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, q, kv, q_ape, k_ape, attn_pos):
"""
Args:
q (torch.Tensor): (B, L_q, C)
kv (torch.Tensor): (B, L_kv, C)
q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding
Returns:
torch.Tensor: (B, L_q, C)
"""
B, q_N, C = q.shape
kv_N = kv.shape[1]
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
else:
q = q + q_ape if q_ape is not None else q
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = kv + k_ape if k_ape is not None else kv
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, q_N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SelfAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(SelfAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, x, q_ape, k_ape, attn_pos):
"""
Args:
x (torch.Tensor): (B, L, C)
q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding
Returns:
torch.Tensor: (B, L, C)
"""
B, N, C = x.shape
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
q = x + q_ape if q_ape is not None else x
q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = x + k_ape if k_ape is not None else x
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class FeatureFusionNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=
False):
super(FeatureFusionNew, self).__init__()
self.z_norm1 = norm_layer(dim)
self.x_norm1 = norm_layer(dim)
self.z_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.x_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop, attn_pos_encoding_only)
self.z_norm2_1 = norm_layer(dim)
self.z_norm2_2 = norm_layer(dim)
self.x_norm2_1 = norm_layer(dim)
self.x_norm2_2 = norm_layer(dim)
self.z_x_cross_attention = CrossAttention(dim, num_heads, qkv_bias,
qk_scale, attn_drop, drop, attn_pos_encoding_only)
self.x_z_cross_attention = CrossAttention(dim, num_heads, qkv_bias,
qk_scale, attn_drop, drop, attn_pos_encoding_only)
mlp_hidden_dim = int(dim * mlp_ratio)
self.z_norm3 = norm_layer(dim)
self.x_norm3 = norm_layer(dim)
self.z_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.x_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.drop_path = drop_path
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5):
primals_1 = self.z_norm1.weight
primals_2 = self.z_norm1.bias
primals_9 = self.x_norm1.weight
primals_10 = self.x_norm1.bias
primals_4 = self.z_self_attn.q.weight
primals_5 = self.z_self_attn.k.weight
primals_6 = self.z_self_attn.v.weight
primals_8 = self.z_self_attn.proj.weight
primals_11 = self.z_self_attn.proj.bias
primals_13 = self.x_self_attn.q.weight
primals_14 = self.x_self_attn.k.weight
primals_15 = self.x_self_attn.v.weight
primals_17 = self.x_self_attn.proj.weight
primals_18 = self.x_self_attn.proj.bias
primals_19 = self.z_norm2_1.weight
primals_20 = self.z_norm2_1.bias
primals_21 = self.z_norm2_2.weight
primals_22 = self.z_norm2_2.bias
primals_28 = self.x_norm2_1.weight
primals_29 = self.x_norm2_1.bias
primals_30 = self.x_norm2_2.weight
primals_31 = self.x_norm2_2.bias
primals_23 = self.z_x_cross_attention.q.weight
primals_24 = self.z_x_cross_attention.k.weight
primals_25 = self.z_x_cross_attention.v.weight
primals_27 = self.z_x_cross_attention.proj.weight
primals_32 = self.z_x_cross_attention.proj.bias
primals_33 = self.x_z_cross_attention.q.weight
primals_34 = self.x_z_cross_attention.k.weight
primals_35 = self.x_z_cross_attention.v.weight
primals_37 = self.x_z_cross_attention.proj.weight
primals_38 = self.x_z_cross_attention.proj.bias
primals_39 = self.z_norm3.weight
primals_40 = self.z_norm3.bias
primals_44 = self.x_norm3.weight
primals_45 = self.x_norm3.bias
primals_41 = self.z_mlp.fc1.weight
primals_42 = self.z_mlp.fc1.bias
primals_43 = self.z_mlp.fc2.weight
primals_46 = self.z_mlp.fc2.bias
primals_47 = self.x_mlp.fc1.weight
primals_48 = self.x_mlp.fc1.bias
primals_49 = self.x_mlp.fc2.weight
primals_50 = self.x_mlp.fc2.bias
primals_3 = input_0
primals_12 = input_1
primals_7 = input_2
primals_16 = input_3
primals_26 = input_4
primals_36 = input_5
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50])
return output[0], output[1]
| zhangzhengde0225/SwinTrack | FeatureFusion | false | 16,842 | [
"MIT"
] | 143 | 526be17f8ef266cb924c6939bd8dda23e9b73249 | https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249 |
TargetQueryDecoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lx/clxvkfdpf6u56ymxjpefnu4p64uvb65dmq6dvbz7ljhx22fx2pbi.py
# Topologically Sorted Source Nodes: [layer_norm, q], Original ATen: [aten.native_layer_norm, aten.add]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# q => add_2
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_4), kwargs = {})
triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x4), tmp8, xmask)
tl.store(out_ptr1 + (x4), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xp/cxp3ouwpdhdlmipppq44wjaey2obmthzec7uqoddmpoigfmupxdx.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/66/c66khgqeijdhapvrfqd7e5uukduawcejvhaqiijbqtnzq6b7lcof.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => exp
# Graph fragment:
# %mul_tensor_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_2, [-1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_2, %amax_default_1), kwargs = {})
# %mul_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor_1, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_3,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zh/czh6tw7ngffcygnivwvcjex5edxy3ms4t27ymyn2hemxlpspxzq7.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zq/czqeiybdb6mlnwo4hmrayt3c44g7hbps2ftgdd7x2mv3sr2mwjbn.py
# Topologically Sorted Source Nodes: [x_2, query, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => var_mean_1
# query => add_5
# x_2 => add_4
# Graph fragment:
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_9), kwargs = {})
# %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_4), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_5, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (2))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/a2/ca2af7xswnpfwl5qxbbls4go4o4ij4omes5uvtshgqgzlewwedv7.py
# Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_2 => add_8, rsqrt_2, var_mean_2
# Graph fragment:
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_14, [1]), kwargs = {correction: 0, keepdim: True})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_8,), kwargs = {})
triton_poi_fused_native_layer_norm_6 = async_compile.triton('triton_poi_fused_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vj/cvj77z4rekblow6ccjhghniklijox2xi7yopqabibffi4i5kw7r5.py
# Topologically Sorted Source Nodes: [layer_norm_2, k_2], Original ATen: [aten.native_layer_norm, aten.add]
# Source node to ATen node mapping:
# k_2 => add_11
# layer_norm_2 => add_8, add_9, mul_5, mul_6, rsqrt_2, sub_3, var_mean_2
# Graph fragment:
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_14, [1]), kwargs = {correction: 0, keepdim: True})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_8,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_14, %getitem_5), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %rsqrt_2), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %primals_12), kwargs = {})
# %add_9 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_13), kwargs = {})
# %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, %primals_16), kwargs = {})
triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + (x2), xmask)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ba/cbatzbmmpftbhakwf5ccqcz43ntsrmh4hzhlro4xztzk4jl3lykp.py
# Topologically Sorted Source Nodes: [x_2, query, layer_norm_1, q_2], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => add_6, add_7, mul_3, mul_4, rsqrt_1, sub_2
# q_2 => add_10
# query => add_5
# x_2 => add_4
# Graph fragment:
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_9), kwargs = {})
# %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_4), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %getitem_3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_10), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_11), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %primals_4), kwargs = {})
triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x4 = (xindex // 4)
x5 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x4), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr7 + (x5), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/du/cduko2a2wjufqof5k7dcobfmlh3spl62rgdgiel6bklf3wzd6de3.py
# Topologically Sorted Source Nodes: [attn_6], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_6 => div_1, exp_1, sum_2
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_25, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
triton_poi_fused__softmax_9 = async_compile.triton('triton_poi_fused__softmax_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_9(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tmp2 - tmp2
tmp4 = tmp3 * tmp1
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7n/c7nsuy663pmek7olaj7xnomqpsspowavb2zqwzrjkaz42y7dnrtc.py
# Topologically Sorted Source Nodes: [x_2, query, x_6, query_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# query => add_5
# query_1 => add_13
# x_2 => add_4
# x_6 => add_12
# Graph fragment:
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_9), kwargs = {})
# %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_4), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_31, %primals_20), kwargs = {})
# %add_13 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %add_12), kwargs = {})
triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/73/c73z2hfes5izl473wn57vaku4rt2ae7swkdamlriywh5x5xt7g3z.py
# Topologically Sorted Source Nodes: [layer_norm_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_3 => add_14, add_15, mul_8, mul_9, rsqrt_3, sub_5, var_mean_3
# Graph fragment:
# %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_13, [2]), kwargs = {correction: 0, keepdim: True})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt_3 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_14,), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_13, %getitem_7), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %rsqrt_3), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_8, %primals_21), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %primals_22), kwargs = {})
triton_poi_fused_native_layer_norm_11 = async_compile.triton('triton_poi_fused_native_layer_norm_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fx/cfxiyfr3c2gao54ay6g5yclydzi2o2ptpuogh7nkfkixpwhktjkb.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# x_9 => add_16, erf, mul_10, mul_11, mul_12
# Graph fragment:
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_33, 0.5), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_33, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_11,), kwargs = {})
# %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_10, %add_16), kwargs = {})
triton_poi_fused_gelu_12 = async_compile.triton('triton_poi_fused_gelu_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_12(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/a5/ca5s5goijithf2osquwwefbmfzpsgnhto5xw5por2nwj6p6al76g.py
# Topologically Sorted Source Nodes: [query_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# query_2 => add_17
# Graph fragment:
# %add_17 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_13, %view_35), kwargs = {})
triton_poi_fused_add_13 = async_compile.triton('triton_poi_fused_add_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_13(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4, 4), (4, 1))
assert_size_stride(primals_19, (4, 4), (4, 1))
assert_size_stride(primals_20, (4, ), (1, ))
assert_size_stride(primals_21, (4, ), (1, ))
assert_size_stride(primals_22, (4, ), (1, ))
assert_size_stride(primals_23, (16, 4), (4, 1))
assert_size_stride(primals_24, (16, ), (1, ))
assert_size_stride(primals_25, (4, 16), (16, 1))
assert_size_stride(primals_26, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm, q], Original ATen: [aten.native_layer_norm, aten.add]
triton_poi_fused_add_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, primals_4, buf2, buf3, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_2
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf4, buf7, 16, 4, grid=grid(16, 4), stream=stream0)
buf8 = reinterpret_tensor(buf4, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf5, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 0, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf9, buf10, 256, grid=grid(256), stream=stream0)
buf11 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf10, buf11, 256, grid=grid(256), stream=stream0)
buf12 = reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf6, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
buf13 = reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 0), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf13, buf14, 16, 4, grid=grid(16, 4), stream=stream0)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf15)
buf16 = buf1; del buf1 # reuse
buf17 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_2, query, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_3, buf15, primals_9, buf16, buf17, 16, grid=grid(16), stream=stream0)
buf18 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf19 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_6.run(primals_14, buf18, buf19, 4, grid=grid(4), stream=stream0)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm_2, k_2], Original ATen: [aten.native_layer_norm, aten.add]
triton_poi_fused_add_native_layer_norm_7.run(primals_14, buf18, buf19, primals_12, primals_13, primals_16, buf20, buf23, 16, grid=grid(16), stream=stream0)
del buf18
del buf19
del primals_12
del primals_13
del primals_16
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, query, layer_norm_1, q_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_8.run(primals_3, buf15, primals_9, buf16, buf17, primals_10, primals_11, primals_4, buf21, 64, grid=grid(64), stream=stream0)
del primals_11
del primals_4
buf22 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf21, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf22)
buf24 = reinterpret_tensor(buf17, (4, 4), (4, 1), 0); del buf17 # reuse
# Topologically Sorted Source Nodes: [linear_5], Original ATen: [aten.mm]
extern_kernels.mm(buf23, reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf24)
buf25 = reinterpret_tensor(buf16, (4, 4), (4, 1), 0); del buf16 # reuse
# Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.mm]
extern_kernels.mm(buf20, reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf25)
buf26 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_4], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf22, buf26, 16, 4, grid=grid(16, 4), stream=stream0)
buf27 = reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 1), 0); del buf22 # reuse
# Topologically Sorted Source Nodes: [attn_4], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf26, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf24, (16, 1, 1), (1, 1, 1), 0), out=buf27)
buf28 = reinterpret_tensor(buf27, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf27 # reuse
# Topologically Sorted Source Nodes: [attn_6], Original ATen: [aten._softmax]
triton_poi_fused__softmax_9.run(buf28, 64, grid=grid(64), stream=stream0)
buf29 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf28, (16, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf25, (16, 1, 1), (1, 1, 1), 0), out=buf29)
buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf29, buf30, 16, 4, grid=grid(16, 4), stream=stream0)
buf31 = reinterpret_tensor(buf29, (16, 4), (4, 1), 0); del buf29 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf30, (16, 4), (4, 1), 0), reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), out=buf31)
buf32 = reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0); del buf31 # reuse
# Topologically Sorted Source Nodes: [x_2, query, x_6, query_1], Original ATen: [aten.add]
triton_poi_fused_add_10.run(buf32, primals_3, buf15, primals_9, primals_20, 64, grid=grid(64), stream=stream0)
del primals_20
buf33 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf34 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_0.run(buf32, buf33, buf34, 16, grid=grid(16), stream=stream0)
buf35 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_11.run(buf32, buf33, buf34, primals_21, primals_22, buf35, 64, grid=grid(64), stream=stream0)
del buf33
del buf34
del primals_22
buf36 = reinterpret_tensor(buf10, (16, 16), (16, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_24, reinterpret_tensor(buf35, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf36)
del primals_24
buf37 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.gelu]
triton_poi_fused_gelu_12.run(buf36, buf37, 256, grid=grid(256), stream=stream0)
buf38 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf37, (16, 16), (16, 1), 0), reinterpret_tensor(primals_25, (16, 4), (1, 16), 0), out=buf38)
buf39 = reinterpret_tensor(buf38, (4, 4, 4), (16, 4, 1), 0); del buf38 # reuse
# Topologically Sorted Source Nodes: [query_2], Original ATen: [aten.add]
triton_poi_fused_add_13.run(buf39, buf32, primals_26, 64, grid=grid(64), stream=stream0)
del primals_26
return (buf39, primals_3, primals_9, primals_10, primals_14, primals_21, reinterpret_tensor(buf3, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf11, reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf15, buf20, reinterpret_tensor(buf21, (16, 4), (4, 1), 0), buf23, buf28, reinterpret_tensor(buf30, (16, 4), (4, 1), 0), buf32, reinterpret_tensor(buf35, (16, 4), (4, 1), 0), buf36, reinterpret_tensor(buf37, (16, 16), (16, 1), 0), primals_25, primals_23, primals_19, reinterpret_tensor(buf25, (16, 1, 1), (1, 1, 4), 0), reinterpret_tensor(buf26, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf24, (16, 1, 1), (1, 4, 1), 0), primals_18, primals_17, primals_15, primals_8, reinterpret_tensor(buf12, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 4), 0), primals_7, primals_6, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(CrossAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, q, kv, q_ape, k_ape, attn_pos):
"""
Args:
q (torch.Tensor): (B, L_q, C)
kv (torch.Tensor): (B, L_kv, C)
q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding
Returns:
torch.Tensor: (B, L_q, C)
"""
B, q_N, C = q.shape
kv_N = kv.shape[1]
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
else:
q = q + q_ape if q_ape is not None else q
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = kv + k_ape if k_ape is not None else kv
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, q_N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SelfAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(SelfAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, x, q_ape, k_ape, attn_pos):
"""
Args:
x (torch.Tensor): (B, L, C)
q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding
Returns:
torch.Tensor: (B, L, C)
"""
B, N, C = x.shape
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
q = x + q_ape if q_ape is not None else x
q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = x + k_ape if k_ape is not None else x
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class TargetQueryDecoderLayer(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super(TargetQueryDecoderLayer, self).__init__()
self.norm_1 = norm_layer(dim)
self.self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop)
self.norm_2_query = norm_layer(dim)
self.norm_2_memory = norm_layer(dim)
self.cross_attn = CrossAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop)
self.norm_3 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(dim, hidden_features=mlp_hidden_dim, act_layer=
act_layer, drop=drop)
self.drop_path = drop_path
def forward(self, query, memory, query_pos, memory_pos):
"""
Args:
query (torch.Tensor): (B, num_queries, C)
memory (torch.Tensor): (B, L, C)
query_pos (torch.Tensor): (1 or B, num_queries, C)
memory_pos (torch.Tensor): (1 or B, L, C)
Returns:
torch.Tensor: (B, num_queries, C)
"""
query = query + self.drop_path(self.self_attn(self.norm_1(query),
query_pos, query_pos, None))
query = query + self.drop_path(self.cross_attn(self.norm_2_query(
query), self.norm_2_memory(memory), query_pos, memory_pos, None))
query = query + self.drop_path(self.mlp(self.norm_3(query)))
return query
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4]),
torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional
import torch.utils.data
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex % 16
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x4, tmp8, xmask)
tl.store(out_ptr1 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x2, xmask)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x4 = xindex // 4
x5 = xindex % 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr7 + x5, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_9(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tmp2 - tmp2
tmp4 = tmp3 * tmp1
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_gelu_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_13(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4, 4), (4, 1))
assert_size_stride(primals_19, (4, 4), (4, 1))
assert_size_stride(primals_20, (4,), (1,))
assert_size_stride(primals_21, (4,), (1,))
assert_size_stride(primals_22, (4,), (1,))
assert_size_stride(primals_23, (16, 4), (4, 1))
assert_size_stride(primals_24, (16,), (1,))
assert_size_stride(primals_25, (4, 16), (16, 1))
assert_size_stride(primals_26, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, primals_4, buf2, buf3, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf4, buf7, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf4, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf4
triton_poi_fused_clone_2[grid(16, 4)](buf5, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf8, (16, 1, 4), (4, 0, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(256)](buf9, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf9
triton_poi_fused__softmax_4[grid(256)](buf10, buf11, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf5
triton_poi_fused_clone_2[grid(16, 4)](buf6, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 1), 0)
del buf6
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 0), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf13, buf14, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0)
del buf13
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf15)
buf16 = buf1
del buf1
buf17 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_3, buf15,
primals_9, buf16, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf19 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_native_layer_norm_6[grid(4)](primals_14, buf18,
buf19, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_14,
buf18, buf19, primals_12, primals_13, primals_16, buf20, buf23,
16, XBLOCK=16, num_warps=1, num_stages=1)
del buf18
del buf19
del primals_12
del primals_13
del primals_16
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_8[grid(64)](primals_3, buf15,
primals_9, buf16, buf17, primals_10, primals_11, primals_4,
buf21, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
del primals_4
buf22 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf21, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf22)
buf24 = reinterpret_tensor(buf17, (4, 4), (4, 1), 0)
del buf17
extern_kernels.mm(buf23, reinterpret_tensor(primals_17, (4, 4), (1,
4), 0), out=buf24)
buf25 = reinterpret_tensor(buf16, (4, 4), (4, 1), 0)
del buf16
extern_kernels.mm(buf20, reinterpret_tensor(primals_18, (4, 4), (1,
4), 0), out=buf25)
buf26 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf22, buf26, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf27 = reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 1), 0)
del buf22
extern_kernels.bmm(reinterpret_tensor(buf26, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf24, (16, 1, 1), (1, 1, 1), 0), out=buf27)
buf28 = reinterpret_tensor(buf27, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf27
triton_poi_fused__softmax_9[grid(64)](buf28, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf28, (16, 4, 1), (4, 1, 1),
0), reinterpret_tensor(buf25, (16, 1, 1), (1, 1, 1), 0), out=buf29)
buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf29, buf30, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf31 = reinterpret_tensor(buf29, (16, 4), (4, 1), 0)
del buf29
extern_kernels.mm(reinterpret_tensor(buf30, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), out=buf31)
buf32 = reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0)
del buf31
triton_poi_fused_add_10[grid(64)](buf32, primals_3, buf15,
primals_9, primals_20, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_20
buf33 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf34 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_0[grid(16)](buf32, buf33, buf34,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf35 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_11[grid(64)](buf32, buf33, buf34,
primals_21, primals_22, buf35, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf33
del buf34
del primals_22
buf36 = reinterpret_tensor(buf10, (16, 16), (16, 1), 0)
del buf10
extern_kernels.addmm(primals_24, reinterpret_tensor(buf35, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_23, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf36)
del primals_24
buf37 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_12[grid(256)](buf36, buf37, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf38 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf37, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_25, (16, 4), (1, 16), 0), out=buf38)
buf39 = reinterpret_tensor(buf38, (4, 4, 4), (16, 4, 1), 0)
del buf38
triton_poi_fused_add_13[grid(64)](buf39, buf32, primals_26, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_26
return (buf39, primals_3, primals_9, primals_10, primals_14, primals_21,
reinterpret_tensor(buf3, (16, 4), (4, 1), 0), reinterpret_tensor(
buf2, (16, 4), (4, 1), 0), buf11, reinterpret_tensor(buf14, (16, 4),
(4, 1), 0), buf15, buf20, reinterpret_tensor(buf21, (16, 4), (4, 1),
0), buf23, buf28, reinterpret_tensor(buf30, (16, 4), (4, 1), 0),
buf32, reinterpret_tensor(buf35, (16, 4), (4, 1), 0), buf36,
reinterpret_tensor(buf37, (16, 16), (16, 1), 0), primals_25,
primals_23, primals_19, reinterpret_tensor(buf25, (16, 1, 1), (1, 1,
4), 0), reinterpret_tensor(buf26, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf24, (16, 1, 1), (1, 4, 1), 0), primals_18,
primals_17, primals_15, primals_8, reinterpret_tensor(buf12, (16, 1,
4), (4, 1, 1), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 4), 0), primals_7,
primals_6, primals_5)
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""
Args:
x (torch.Tensor): (B, L, C), input tensor
Returns:
torch.Tensor: (B, L, C), output tensor
"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(CrossAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, q, kv, q_ape, k_ape, attn_pos):
"""
Args:
q (torch.Tensor): (B, L_q, C)
kv (torch.Tensor): (B, L_kv, C)
q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding
Returns:
torch.Tensor: (B, L_q, C)
"""
B, q_N, C = q.shape
kv_N = kv.shape[1]
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
else:
q = q + q_ape if q_ape is not None else q
q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = kv + k_ape if k_ape is not None else kv
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, q_N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SelfAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0, attn_pos_encoding_only=False):
super(SelfAttention, self).__init__()
assert dim % num_heads == 0, f'dim {dim} should be divided by num_heads {num_heads}.'
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if attn_pos_encoding_only:
self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)
else:
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_pos_encoding_only = attn_pos_encoding_only
def forward(self, x, q_ape, k_ape, attn_pos):
"""
Args:
x (torch.Tensor): (B, L, C)
q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q
k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k
attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding
Returns:
torch.Tensor: (B, L, C)
"""
B, N, C = x.shape
if self.attn_pos_encoding_only:
assert q_ape is None and k_ape is None
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.
num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
q = x + q_ape if q_ape is not None else x
q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
k = x + k_ape if k_ape is not None else x
k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads
).permute(0, 2, 1, 3)
attn = q @ k.transpose(-2, -1)
attn = attn * self.scale
if attn_pos is not None:
attn = attn + attn_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class TargetQueryDecoderLayerNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=nn.Identity(),
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super(TargetQueryDecoderLayerNew, self).__init__()
self.norm_1 = norm_layer(dim)
self.self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop)
self.norm_2_query = norm_layer(dim)
self.norm_2_memory = norm_layer(dim)
self.cross_attn = CrossAttention(dim, num_heads, qkv_bias, qk_scale,
attn_drop, drop)
self.norm_3 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(dim, hidden_features=mlp_hidden_dim, act_layer=
act_layer, drop=drop)
self.drop_path = drop_path
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.norm_1.weight
primals_2 = self.norm_1.bias
primals_4 = self.self_attn.q.weight
primals_5 = self.self_attn.k.weight
primals_6 = self.self_attn.v.weight
primals_7 = self.self_attn.proj.weight
primals_9 = self.self_attn.proj.bias
primals_10 = self.norm_2_query.weight
primals_11 = self.norm_2_query.bias
primals_12 = self.norm_2_memory.weight
primals_13 = self.norm_2_memory.bias
primals_8 = self.cross_attn.q.weight
primals_14 = self.cross_attn.k.weight
primals_15 = self.cross_attn.v.weight
primals_16 = self.cross_attn.proj.weight
primals_20 = self.cross_attn.proj.bias
primals_21 = self.norm_3.weight
primals_22 = self.norm_3.bias
primals_23 = self.mlp.fc1.weight
primals_24 = self.mlp.fc1.bias
primals_25 = self.mlp.fc2.weight
primals_26 = self.mlp.fc2.bias
primals_3 = input_0
primals_17 = input_1
primals_18 = input_2
primals_19 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26])
return output[0]
| zhangzhengde0225/SwinTrack | TargetQueryDecoderLayer | false | 16,843 | [
"MIT"
] | 143 | 526be17f8ef266cb924c6939bd8dda23e9b73249 | https://github.com/zhangzhengde0225/SwinTrack/tree/526be17f8ef266cb924c6939bd8dda23e9b73249 |
Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/pj/cpj23za7h3d7zizd7t4zvznbw7r7glhgaljjy5hg4agxtr5j2enq.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 238144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3721) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7n/c7nwygrb2z2hfgjjjll3mjp3fehfwiy5i6soguflwj2ccp6ciglx.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_2 => convolution_1
# x_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 53824
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3364) % 4
x0 = xindex % 3364
x4 = (xindex // 3364)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (3392*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ch/cchwcdlqa7ez3lyxon6yzdhm3wc5fqs7jjelloqsobomcahxc4ux.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_4 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 13456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 29
x1 = (xindex // 29) % 29
x4 = (xindex // 841)
x3 = (xindex // 3364)
x5 = xindex % 3364
tmp0 = tl.load(in_ptr0 + ((2*x0) + (116*x1) + (3392*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (116*x1) + (3392*x4)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (58 + (2*x0) + (116*x1) + (3392*x4)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (59 + (2*x0) + (116*x1) + (3392*x4)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + (3392*x3)), tmp6, xmask)
tl.store(out_ptr1 + (x5 + (3456*x3)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/el/celwfn7c3fyh2qb3tjc64aj46d4th6gq2q2kw5yjjivhvlfeba5e.py
# Topologically Sorted Source Nodes: [conv2d_2, x_5], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_5 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 10816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 169) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6f/c6fnhazqeo5qw5gr6v7jt4y47qmetmowisz6nc67izzrkmp4sush.py
# Topologically Sorted Source Nodes: [conv2d_3, mu], Original ATen: [aten.convolution, aten.tanh]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# mu => tanh
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_tanh_4 = async_compile.triton('triton_poi_fused_convolution_tanh_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_tanh_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_tanh_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 300
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 25) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (16, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (4, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (16, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (3, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_9, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 61, 61), (59536, 3721, 61, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 238144, grid=grid(238144), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 58, 58), (13456, 3364, 58, 1))
buf3 = empty_strided_cuda((4, 4, 58, 58), (13568, 3392, 58, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_5, buf3, 53824, grid=grid(53824), stream=stream0)
del buf2
del primals_5
buf4 = empty_strided_cuda((4, 4, 29, 29), (3392, 841, 29, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 29, 29), (3456, 841, 29, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_2.run(buf3, buf4, buf5, 13456, grid=grid(13456), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 13, 13), (2704, 169, 13, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf7, primals_7, 10816, grid=grid(10816), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 3, 5, 5), (75, 25, 5, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, mu], Original ATen: [aten.convolution, aten.tanh]
triton_poi_fused_convolution_tanh_4.run(buf9, primals_9, 300, grid=grid(300), stream=stream0)
del primals_9
return (buf9, primals_1, primals_2, primals_4, primals_6, primals_8, buf1, buf3, buf4, buf5, buf7, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16, 4, 4), (256, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((3, 16, 4, 4), (256, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, kernel_size):
super(Actor, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=kernel_size)
self.conv2 = nn.Conv2d(16, 4, kernel_size=kernel_size)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv1_ = nn.Conv2d(4, 16, kernel_size=kernel_size, stride=2)
self.conv2_ = nn.Conv2d(16, 3, kernel_size=kernel_size, stride=2)
def forward(self, inputs):
x = inputs
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool1(x)
x = F.relu(self.conv1_(x))
mu = F.tanh(self.conv2_(x))
return mu
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 238144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3721 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 53824
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3364 % 4
x0 = xindex % 3364
x4 = xindex // 3364
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3392 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 13456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 29
x1 = xindex // 29 % 29
x4 = xindex // 841
x3 = xindex // 3364
x5 = xindex % 3364
tmp0 = tl.load(in_ptr0 + (2 * x0 + 116 * x1 + 3392 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 116 * x1 + 3392 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (58 + 2 * x0 + 116 * x1 + 3392 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (59 + 2 * x0 + 116 * x1 + 3392 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + 3392 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x5 + 3456 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 10816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 169 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_tanh_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 300
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 25 % 3
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (16, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (16, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (3, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_9, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 61, 61), (59536, 3721, 61, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(238144)](buf1, primals_3,
238144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 58, 58), (13456, 3364, 58, 1))
buf3 = empty_strided_cuda((4, 4, 58, 58), (13568, 3392, 58, 1),
torch.float32)
triton_poi_fused_convolution_relu_1[grid(53824)](buf2, primals_5,
buf3, 53824, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
del primals_5
buf4 = empty_strided_cuda((4, 4, 29, 29), (3392, 841, 29, 1), torch
.float32)
buf5 = empty_strided_cuda((4, 4, 29, 29), (3456, 841, 29, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_2[grid(13456)](buf3, buf4,
buf5, 13456, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 13, 13), (2704, 169, 13, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_3[grid(10816)](buf7, primals_7,
10816, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 3, 5, 5), (75, 25, 5, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_tanh_4[grid(300)](buf9, primals_9, 300,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
return (buf9, primals_1, primals_2, primals_4, primals_6, primals_8,
buf1, buf3, buf4, buf5, buf7, buf9)
class ActorNew(nn.Module):
def __init__(self, kernel_size):
super(ActorNew, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=kernel_size)
self.conv2 = nn.Conv2d(16, 4, kernel_size=kernel_size)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv1_ = nn.Conv2d(4, 16, kernel_size=kernel_size, stride=2)
self.conv2_ = nn.Conv2d(16, 3, kernel_size=kernel_size, stride=2)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv1_.weight
primals_7 = self.conv1_.bias
primals_8 = self.conv2_.weight
primals_9 = self.conv2_.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| zwc662/SequentialAttack | Actor | false | 16,844 | [
"MIT"
] | 116 | 677b19c51ea76d794939ee126fccd75ffa0e6fe6 | https://github.com/zwc662/SequentialAttack/tree/677b19c51ea76d794939ee126fccd75ffa0e6fe6 |
StdConv2dSame | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xs/cxs2a7zwcw5yxvn445xldhvii7772mtsthpxnfawxoahvyf3vtaj.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_2, [1, 2, 1, 2], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 7) % 7
x0 = xindex % 7
x2 = (xindex // 49)
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/75/c75nb7a2vxv7rlsqcedi6uzl4pt7t4fprq3eyxtv6vohg4c3v5oj.py
# Topologically Sorted Source Nodes: [std_mean_1, sub_1, add_1, weight_1], Original ATen: [aten.std_mean, aten.sub, aten.add, aten.div]
# Source node to ATen node mapping:
# add_1 => add_1
# std_mean_1 => sqrt_1, var_mean_1
# sub_1 => sub_1
# weight_1 => div_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 0.0, keepdim: True})
# %sqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%getitem_2,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt_1, 1e-05), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %add_1), kwargs = {})
triton_per_fused_add_div_std_mean_sub_1 = async_compile.triton('triton_per_fused_add_div_std_mean_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_std_mean_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_std_mean_sub_1(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = tmp0 - tmp10
tmp21 = 1e-05
tmp22 = tmp19 + tmp21
tmp23 = tmp20 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp19, xmask)
tl.store(out_ptr1 + (r1 + (64*x0)), tmp23, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_2, buf0, 784, grid=grid(784), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf4 = reinterpret_tensor(buf2, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [std_mean_1, sub_1, add_1, weight_1], Original ATen: [aten.std_mean, aten.sub, aten.add, aten.div]
triton_per_fused_add_div_std_mean_sub_1.run(buf4, primals_1, buf5, 4, 64, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf0, buf5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
return (buf6, primals_1, buf0, buf4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torchvision.transforms.functional as F
import torch.nn.functional as F
import torch.utils.data.distributed
def get_same_padding(x: 'int', k: 'int', s: 'int', d: 'int'):
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
def pad_same(x, k, s, d=(1, 1), value=0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw,
k[1], s[1], d[1])
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h -
pad_h // 2], value=value)
return x
class StdConv2dSame(nn.Conv2d):
"""Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model.
Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` -
https://arxiv.org/abs/1903.10520v2
"""
def __init__(self, in_channel, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=False, eps=1e-05):
super().__init__(in_channel, out_channels, kernel_size, stride=
stride, padding=0, dilation=dilation, groups=groups, bias=bias)
self.eps = eps
def get_weight(self):
std, mean = torch.std_mean(self.weight, dim=[1, 2, 3], keepdim=True,
unbiased=False)
weight = (self.weight - mean) / (std + self.eps)
return weight
def forward(self, x):
x = pad_same(x, self.get_weight().shape[-2:], self.stride, self.
dilation)
return F.conv2d(x, self.get_weight(), self.bias, self.stride, (0, 0
), self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torchvision.transforms.functional as F
import torch.nn.functional as F
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_per_fused_add_div_std_mean_sub_1(in_out_ptr0, in_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = tmp0 - tmp10
tmp21 = 1e-05
tmp22 = tmp19 + tmp21
tmp23 = tmp20 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp19, xmask)
tl.store(out_ptr1 + (r1 + 64 * x0), tmp23, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(784)](primals_2, buf0, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf4 = reinterpret_tensor(buf2, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_add_div_std_mean_sub_1[grid(4)](buf4, primals_1,
buf5, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf6 = extern_kernels.convolution(buf0, buf5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
return buf6, primals_1, buf0, buf4, buf5
def get_same_padding(x: 'int', k: 'int', s: 'int', d: 'int'):
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
def pad_same(x, k, s, d=(1, 1), value=0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw,
k[1], s[1], d[1])
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h -
pad_h // 2], value=value)
return x
class StdConv2dSameNew(nn.Conv2d):
"""Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model.
Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` -
https://arxiv.org/abs/1903.10520v2
"""
def __init__(self, in_channel, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=False, eps=1e-05):
super().__init__(in_channel, out_channels, kernel_size, stride=
stride, padding=0, dilation=dilation, groups=groups, bias=bias)
self.eps = eps
def get_weight(self):
std, mean = torch.std_mean(self.weight, dim=[1, 2, 3], keepdim=True,
unbiased=False)
weight = (self.weight - mean) / (std + self.eps)
return weight
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| ziniuwan/maed | StdConv2dSame | false | 16,845 | [
"MIT"
] | 145 | 9e1f1c37eba81da86c8d9c62dc9be41a01abff5b | https://github.com/ziniuwan/maed/tree/9e1f1c37eba81da86c8d9c62dc9be41a01abff5b |
MDNHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/oh/cohtdusl7z6f6qrzdvtdzv3eg7l7omf2dsjnhzs2htjrarc47o5n.py
# Topologically Sorted Source Nodes: [mul, std], Original ATen: [aten.mul, aten.exp]
# Source node to ATen node mapping:
# mul => mul
# std => exp
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_8, 0.5), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
triton_poi_fused_exp_mul_0 = async_compile.triton('triton_poi_fused_exp_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tl.store(in_out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf2)
del primals_6
buf3 = reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [mul, std], Original ATen: [aten.mul, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_exp_mul_0.run(buf3, primals_7, 1024, grid=grid(1024), stream=stream0)
del primals_7
return (reinterpret_tensor(buf0, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1), 0), buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from torch.distributions import Categorical
from torch.nn.utils import vector_to_parameters
from torch.nn.utils import parameters_to_vector
def ortho_init(module, nonlinearity=None, weight_scale=1.0, constant_bias=0.0):
"""Applies orthogonal initialization for the parameters of a given module.
Args:
module (nn.Module): A module to apply orthogonal initialization over its parameters.
nonlinearity (str, optional): Nonlinearity followed by forward pass of the module. When nonlinearity
is not ``None``, the gain will be calculated and :attr:`weight_scale` will be ignored.
Default: ``None``
weight_scale (float, optional): Scaling factor to initialize the weight. Ignored when
:attr:`nonlinearity` is not ``None``. Default: 1.0
constant_bias (float, optional): Constant value to initialize the bias. Default: 0.0
.. note::
Currently, the only supported :attr:`module` are elementary neural network layers, e.g.
nn.Linear, nn.Conv2d, nn.LSTM. The submodules are not supported.
Example::
>>> a = nn.Linear(2, 3)
>>> ortho_init(a)
"""
if nonlinearity is not None:
gain = nn.init.calculate_gain(nonlinearity)
else:
gain = weight_scale
if isinstance(module, (nn.RNNBase, nn.RNNCellBase)):
for name, param in module.named_parameters():
if 'weight_' in name:
nn.init.orthogonal_(param, gain=gain)
elif 'bias_' in name:
nn.init.constant_(param, constant_bias)
else:
nn.init.orthogonal_(module.weight, gain=gain)
nn.init.constant_(module.bias, constant_bias)
class MDNHead(Module):
def __init__(self, in_features, out_features, num_density, **kwargs):
super().__init__(**kwargs)
self.in_features = in_features
self.out_features = out_features
self.num_density = num_density
self.pi_head = nn.Linear(in_features, out_features * num_density)
ortho_init(self.pi_head, weight_scale=0.01, constant_bias=0.0)
self.mean_head = nn.Linear(in_features, out_features * num_density)
ortho_init(self.mean_head, weight_scale=0.01, constant_bias=0.0)
self.logvar_head = nn.Linear(in_features, out_features * num_density)
ortho_init(self.logvar_head, weight_scale=0.01, constant_bias=0.0)
def forward(self, x):
logit_pi = self.pi_head(x).view(-1, self.num_density, self.out_features
)
mean = self.mean_head(x).view(-1, self.num_density, self.out_features)
logvar = self.logvar_head(x).view(-1, self.num_density, self.
out_features)
std = torch.exp(0.5 * logvar)
return logit_pi, mean, std
def loss(self, logit_pi, mean, std, target):
"""Calculate the MDN loss function.
The loss function (negative log-likelihood) is defined by:
.. math::
L = -\\frac{1}{N}\\sum_{n=1}^{N}\\ln \\left( \\sum_{k=1}^{K}\\prod_{d=1}^{D} \\pi_{k}(x_{n, d})
\\mathcal{N}\\left( \\mu_k(x_{n, d}), \\sigma_k(x_{n,d}) \\right) \\right)
For better numerical stability, we could use log-scale:
.. math::
L = -\\frac{1}{N}\\sum_{n=1}^{N}\\ln \\left( \\sum_{k=1}^{K}\\exp \\left\\{ \\sum_{d=1}^{D}
\\ln\\pi_{k}(x_{n, d}) + \\ln\\mathcal{N}\\left( \\mu_k(x_{n, d}), \\sigma_k(x_{n,d})
\\right) \\right\\} \\right)
.. note::
One should always use the second formula via log-sum-exp trick. The first formula
is numerically unstable resulting in +/- ``Inf`` and ``NaN`` error.
The log-sum-exp trick is defined by
.. math::
\\log\\sum_{i=1}^{N}\\exp(x_i) = a + \\log\\sum_{i=1}^{N}\\exp(x_i - a)
where :math:`a = \\max_i(x_i)`
Args:
logit_pi (Tensor): the logit of mixing coefficients, shape [N, K, D]
mean (Tensor): mean of Gaussian mixtures, shape [N, K, D]
std (Tensor): standard deviation of Gaussian mixtures, shape [N, K, D]
target (Tensor): target tensor, shape [N, D]
Returns:
Tensor: calculated loss
"""
target = target.unsqueeze(1)
log_pi = F.log_softmax(logit_pi, dim=1)
dist = Normal(mean, std)
log_probs = dist.log_prob(target)
joint_log_probs = torch.sum(log_pi + log_probs, dim=-1, keepdim=False)
loss = torch.logsumexp(joint_log_probs, dim=-1, keepdim=False)
loss = -loss.mean(0)
return loss
def sample(self, logit_pi, mean, std, tau=1.0):
"""Sample from Gaussian mixtures using reparameterization trick.
- Firstly sample categorically over mixing coefficients to determine a specific Gaussian
- Then sample from selected Gaussian distribution
Args:
logit_pi (Tensor): the logit of mixing coefficients, shape [N, K, D]
mean (Tensor): mean of Gaussian mixtures, shape [N, K, D]
std (Tensor): standard deviation of Gaussian mixtures, shape [N, K, D]
tau (float): temperature during sampling, it controls uncertainty.
* If :math:`\\tau > 1`: increase uncertainty
* If :math:`\\tau < 1`: decrease uncertainty
Returns:
Tensor: sampled data with shape [N, D]
"""
N, K, D = logit_pi.shape
pi = F.softmax(logit_pi / tau, dim=1)
pi = pi.permute(0, 2, 1).view(-1, K)
mean = mean.permute(0, 2, 1).view(-1, K)
std = std.permute(0, 2, 1).view(-1, K)
pi_samples = Categorical(pi).sample()
mean = mean[torch.arange(N * D), pi_samples]
std = std[torch.arange(N * D), pi_samples]
eps = torch.randn_like(std)
samples = mean + eps * std * np.sqrt(tau)
samples = samples.view(N, D)
return samples
class Module(nn.Module):
"""Wrap PyTorch nn.module to provide more helper functions. """
def __init__(self, **kwargs):
super().__init__()
for key, val in kwargs.items():
self.__setattr__(key, val)
@property
def num_params(self):
"""Returns the total number of parameters in the neural network. """
return sum(param.numel() for param in self.parameters())
@property
def num_trainable_params(self):
"""Returns the total number of trainable parameters in the neural network."""
return sum(param.numel() for param in self.parameters() if param.
requires_grad)
@property
def num_untrainable_params(self):
"""Returns the total number of untrainable parameters in the neural network. """
return sum(param.numel() for param in self.parameters() if not
param.requires_grad)
def to_vec(self):
"""Returns the network parameters as a single flattened vector. """
return parameters_to_vector(parameters=self.parameters())
def from_vec(self, x):
"""Set the network parameters from a single flattened vector.
Args:
x (Tensor): A single flattened vector of the network parameters with consistent size.
"""
vector_to_parameters(vec=x, parameters=self.parameters())
def save(self, f):
"""Save the network parameters to a file.
It complies with the `recommended approach for saving a model in PyTorch documentation`_.
.. note::
It uses the highest pickle protocol to serialize the network parameters.
Args:
f (str): file path.
.. _recommended approach for saving a model in PyTorch documentation:
https://pytorch.org/docs/master/notes/serialization.html#best-practices
"""
torch.save(obj=self.state_dict(), f=f, pickle_protocol=pickle.
HIGHEST_PROTOCOL)
def load(self, f):
"""Load the network parameters from a file.
It complies with the `recommended approach for saving a model in PyTorch documentation`_.
Args:
f (str): file path.
.. _recommended approach for saving a model in PyTorch documentation:
https://pytorch.org/docs/master/notes/serialization.html#best-practices
"""
self.load_state_dict(torch.load(f))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4, 'num_density': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from torch.distributions import Categorical
from torch.nn.utils import vector_to_parameters
from torch.nn.utils import parameters_to_vector
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_exp_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tl.store(in_out_ptr0 + x2, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf2)
del primals_6
buf3 = reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0)
del buf2
get_raw_stream(0)
triton_poi_fused_exp_mul_0[grid(1024)](buf3, primals_7, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return reinterpret_tensor(buf0, (64, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1), 0
), buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf3
def ortho_init(module, nonlinearity=None, weight_scale=1.0, constant_bias=0.0):
"""Applies orthogonal initialization for the parameters of a given module.
Args:
module (nn.Module): A module to apply orthogonal initialization over its parameters.
nonlinearity (str, optional): Nonlinearity followed by forward pass of the module. When nonlinearity
is not ``None``, the gain will be calculated and :attr:`weight_scale` will be ignored.
Default: ``None``
weight_scale (float, optional): Scaling factor to initialize the weight. Ignored when
:attr:`nonlinearity` is not ``None``. Default: 1.0
constant_bias (float, optional): Constant value to initialize the bias. Default: 0.0
.. note::
Currently, the only supported :attr:`module` are elementary neural network layers, e.g.
nn.Linear, nn.Conv2d, nn.LSTM. The submodules are not supported.
Example::
>>> a = nn.Linear(2, 3)
>>> ortho_init(a)
"""
if nonlinearity is not None:
gain = nn.init.calculate_gain(nonlinearity)
else:
gain = weight_scale
if isinstance(module, (nn.RNNBase, nn.RNNCellBase)):
for name, param in module.named_parameters():
if 'weight_' in name:
nn.init.orthogonal_(param, gain=gain)
elif 'bias_' in name:
nn.init.constant_(param, constant_bias)
else:
nn.init.orthogonal_(module.weight, gain=gain)
nn.init.constant_(module.bias, constant_bias)
class MDNHeadNew(Module):
def __init__(self, in_features, out_features, num_density, **kwargs):
super().__init__(**kwargs)
self.in_features = in_features
self.out_features = out_features
self.num_density = num_density
self.pi_head = nn.Linear(in_features, out_features * num_density)
ortho_init(self.pi_head, weight_scale=0.01, constant_bias=0.0)
self.mean_head = nn.Linear(in_features, out_features * num_density)
ortho_init(self.mean_head, weight_scale=0.01, constant_bias=0.0)
self.logvar_head = nn.Linear(in_features, out_features * num_density)
ortho_init(self.logvar_head, weight_scale=0.01, constant_bias=0.0)
def loss(self, logit_pi, mean, std, target):
"""Calculate the MDN loss function.
The loss function (negative log-likelihood) is defined by:
.. math::
L = -\\frac{1}{N}\\sum_{n=1}^{N}\\ln \\left( \\sum_{k=1}^{K}\\prod_{d=1}^{D} \\pi_{k}(x_{n, d})
\\mathcal{N}\\left( \\mu_k(x_{n, d}), \\sigma_k(x_{n,d}) \\right) \\right)
For better numerical stability, we could use log-scale:
.. math::
L = -\\frac{1}{N}\\sum_{n=1}^{N}\\ln \\left( \\sum_{k=1}^{K}\\exp \\left\\{ \\sum_{d=1}^{D}
\\ln\\pi_{k}(x_{n, d}) + \\ln\\mathcal{N}\\left( \\mu_k(x_{n, d}), \\sigma_k(x_{n,d})
\\right) \\right\\} \\right)
.. note::
One should always use the second formula via log-sum-exp trick. The first formula
is numerically unstable resulting in +/- ``Inf`` and ``NaN`` error.
The log-sum-exp trick is defined by
.. math::
\\log\\sum_{i=1}^{N}\\exp(x_i) = a + \\log\\sum_{i=1}^{N}\\exp(x_i - a)
where :math:`a = \\max_i(x_i)`
Args:
logit_pi (Tensor): the logit of mixing coefficients, shape [N, K, D]
mean (Tensor): mean of Gaussian mixtures, shape [N, K, D]
std (Tensor): standard deviation of Gaussian mixtures, shape [N, K, D]
target (Tensor): target tensor, shape [N, D]
Returns:
Tensor: calculated loss
"""
target = target.unsqueeze(1)
log_pi = F.log_softmax(logit_pi, dim=1)
dist = Normal(mean, std)
log_probs = dist.log_prob(target)
joint_log_probs = torch.sum(log_pi + log_probs, dim=-1, keepdim=False)
loss = torch.logsumexp(joint_log_probs, dim=-1, keepdim=False)
loss = -loss.mean(0)
return loss
def sample(self, logit_pi, mean, std, tau=1.0):
"""Sample from Gaussian mixtures using reparameterization trick.
- Firstly sample categorically over mixing coefficients to determine a specific Gaussian
- Then sample from selected Gaussian distribution
Args:
logit_pi (Tensor): the logit of mixing coefficients, shape [N, K, D]
mean (Tensor): mean of Gaussian mixtures, shape [N, K, D]
std (Tensor): standard deviation of Gaussian mixtures, shape [N, K, D]
tau (float): temperature during sampling, it controls uncertainty.
* If :math:`\\tau > 1`: increase uncertainty
* If :math:`\\tau < 1`: decrease uncertainty
Returns:
Tensor: sampled data with shape [N, D]
"""
N, K, D = logit_pi.shape
pi = F.softmax(logit_pi / tau, dim=1)
pi = pi.permute(0, 2, 1).view(-1, K)
mean = mean.permute(0, 2, 1).view(-1, K)
std = std.permute(0, 2, 1).view(-1, K)
pi_samples = Categorical(pi).sample()
mean = mean[torch.arange(N * D), pi_samples]
std = std[torch.arange(N * D), pi_samples]
eps = torch.randn_like(std)
samples = mean + eps * std * np.sqrt(tau)
samples = samples.view(N, D)
return samples
def forward(self, input_0):
primals_1 = self.pi_head.weight
primals_2 = self.pi_head.bias
primals_4 = self.mean_head.weight
primals_5 = self.mean_head.bias
primals_6 = self.logvar_head.weight
primals_7 = self.logvar_head.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1], output[2]
class Module(nn.Module):
"""Wrap PyTorch nn.module to provide more helper functions. """
def __init__(self, **kwargs):
super().__init__()
for key, val in kwargs.items():
self.__setattr__(key, val)
@property
def num_params(self):
"""Returns the total number of parameters in the neural network. """
return sum(param.numel() for param in self.parameters())
@property
def num_trainable_params(self):
"""Returns the total number of trainable parameters in the neural network."""
return sum(param.numel() for param in self.parameters() if param.
requires_grad)
@property
def num_untrainable_params(self):
"""Returns the total number of untrainable parameters in the neural network. """
return sum(param.numel() for param in self.parameters() if not
param.requires_grad)
def to_vec(self):
"""Returns the network parameters as a single flattened vector. """
return parameters_to_vector(parameters=self.parameters())
def from_vec(self, x):
"""Set the network parameters from a single flattened vector.
Args:
x (Tensor): A single flattened vector of the network parameters with consistent size.
"""
vector_to_parameters(vec=x, parameters=self.parameters())
def save(self, f):
"""Save the network parameters to a file.
It complies with the `recommended approach for saving a model in PyTorch documentation`_.
.. note::
It uses the highest pickle protocol to serialize the network parameters.
Args:
f (str): file path.
.. _recommended approach for saving a model in PyTorch documentation:
https://pytorch.org/docs/master/notes/serialization.html#best-practices
"""
torch.save(obj=self.state_dict(), f=f, pickle_protocol=pickle.
HIGHEST_PROTOCOL)
def load(self, f):
"""Load the network parameters from a file.
It complies with the `recommended approach for saving a model in PyTorch documentation`_.
Args:
f (str): file path.
.. _recommended approach for saving a model in PyTorch documentation:
https://pytorch.org/docs/master/notes/serialization.html#best-practices
"""
self.load_state_dict(torch.load(f))
| zuoxingdong/lagom | MDNHead | false | 16,846 | [
"MIT"
] | 383 | 3b6710804dbc79c6dffb369ac87c68f4055ab6cd | https://github.com/zuoxingdong/lagom/tree/3b6710804dbc79c6dffb369ac87c68f4055ab6cd |
_ASPPModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/vt/cvtlwsahn5oxfhga3dwjdswt3bhkvbh4zkaufpwgeopelblrwpnw.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, add, pow_2], Original ATen: [aten.pow, aten.sum, aten.add]
# Source node to ATen node mapping:
# add => add
# pow_1 => pow_1
# pow_2 => pow_2
# sum_1 => sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-05), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.5), kwargs = {})
triton_per_fused_add_pow_sum_0 = async_compile.triton('triton_per_fused_add_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_pow_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_pow_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bk/cbk3oabqcrqeiz2rnrvk4rxkdmk4ef46ye5mdaspijuw6pzegair.py
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add]
# Source node to ATen node mapping:
# add_1 => add_1
# embedding => mul
# mean => mean
# pow_3 => pow_3
# pow_4 => pow_4
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %primals_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_3, [1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-05), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 0.5), kwargs = {})
triton_poi_fused_add_mean_mul_pow_1 = async_compile.triton('triton_poi_fused_add_mean_mul_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (1))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2))
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (3))
tmp19 = tl.broadcast_to(tmp18, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp4 = tmp3 * tmp3
tmp8 = tmp5 * tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp14 = tmp11 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp20 = tmp17 * tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = 4.0
tmp24 = tmp22 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tl.store(out_ptr0 + (x0), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ni/cnixk7pwsdlgsjxwsmpbrzopu3rzfg26mvtp5cemfxo35lnktysi.py
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4, norm, mul_1, add_2, tanh, gate], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add, aten.div, aten.tanh]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# embedding => mul
# gate => add_3
# mean => mean
# mul_1 => mul_1
# norm => div
# pow_3 => pow_3
# pow_4 => pow_4
# tanh => tanh
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %primals_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_3, [1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-05), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_3, %pow_4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %div), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_4), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1.0), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_tanh_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 / tmp4
tmp6 = tmp2 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = libdevice.tanh(tmp8)
tmp10 = 1.0
tmp11 = tmp9 + tmp10
tl.store(out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vz/cvzd7uxhjcuuvhocpajasfl7npgg3w4tjtzribzneczbe5yv6xr6.py
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4, norm, mul_1, add_2, tanh, gate, x], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add, aten.div, aten.tanh]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# embedding => mul
# gate => add_3
# mean => mean
# mul_1 => mul_1
# norm => div
# pow_3 => pow_3
# pow_4 => pow_4
# tanh => tanh
# x => mul_2
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, %primals_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_3, [1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-05), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_3, %pow_4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %div), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_4), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1.0), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %add_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_tanh_3 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_tanh_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/u4/cu4g5g7yw4hmus54eigq2n2o2g7qmofps57iinixiiyqxgzkpd6y.py
# Topologically Sorted Source Nodes: [x_2, relu], Original ATen: [aten.native_group_norm, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# x_2 => add_4, add_5, mul_4, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %unsqueeze_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_per_fused_native_group_norm_relu_threshold_backward_4 = async_compile.triton('triton_per_fused_native_group_norm_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 512],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_relu_threshold_backward_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel):
xnumel = 4
XBLOCK: tl.constexpr = 1
rnumel = 324
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
r3 = (rindex // 81)
tmp0 = tl.load(in_ptr0 + (r1 + (324*x0)), rmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r3), rmask, eviction_policy='evict_last', other=0.0)
tmp26 = tl.load(in_ptr2 + (r3), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.where(rmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp8 = tl.full([1], 324, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = tl.where(rmask, tmp13, 0)
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp17 = tmp0 - tmp10
tmp18 = 324.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp30 = 0.0
tmp31 = tmp29 <= tmp30
tl.store(out_ptr2 + (r1 + (324*x0)), tmp29, rmask)
tl.store(out_ptr3 + (r1 + (324*x0)), tmp31, rmask)
tl.store(out_ptr4 + (x0), tmp22, None)
tl.store(out_ptr0 + (x0), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [pow_1, sum_1, add, pow_2], Original ATen: [aten.pow, aten.sum, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_add_pow_sum_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add]
triton_poi_fused_add_mean_mul_pow_1.run(buf1, primals_2, buf2, 4, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4, norm, mul_1, add_2, tanh, gate], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add, aten.div, aten.tanh]
triton_poi_fused_add_div_mean_mul_pow_tanh_2.run(buf1, primals_2, primals_3, buf2, primals_4, buf3, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding, pow_3, mean, add_1, pow_4, norm, mul_1, add_2, tanh, gate, x], Original ATen: [aten.mul, aten.pow, aten.mean, aten.add, aten.div, aten.tanh]
triton_poi_fused_add_div_mean_mul_pow_tanh_3.run(primals_1, buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 9, 9), (324, 81, 9, 1))
buf6 = buf2; del buf2 # reuse
buf10 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.bool)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [x_2, relu], Original ATen: [aten.native_group_norm, aten.relu, aten.threshold_backward]
triton_per_fused_native_group_norm_relu_threshold_backward_4.run(buf5, primals_6, primals_7, buf6, buf10, buf11, buf9, 4, 324, grid=grid(4), stream=stream0)
del primals_7
return (buf10, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf1, buf4, buf5, reinterpret_tensor(buf6, (4, 1), (1, 1), 0), reinterpret_tensor(buf9, (4, 1), (1, 1), 0), buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GCT(nn.Module):
def __init__(self, num_channels, epsilon=1e-05, mode='l2', after_relu=False
):
super(GCT, self).__init__()
self.alpha = nn.Parameter(torch.ones(1, num_channels, 1, 1))
self.gamma = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.epsilon = epsilon
self.mode = mode
self.after_relu = after_relu
def forward(self, x):
if self.mode == 'l2':
embedding = (x.pow(2).sum((2, 3), keepdim=True) + self.epsilon
).pow(0.5) * self.alpha
norm = self.gamma / (embedding.pow(2).mean(dim=1, keepdim=True) +
self.epsilon).pow(0.5)
elif self.mode == 'l1':
if not self.after_relu:
_x = torch.abs(x)
else:
_x = x
embedding = _x.sum((2, 3), keepdim=True) * self.alpha
norm = self.gamma / (torch.abs(embedding).mean(dim=1, keepdim=
True) + self.epsilon)
else:
None
exit()
gate = 1.0 + torch.tanh(embedding * norm + self.beta)
return x * gate
class _ASPPModule(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation):
super(_ASPPModule, self).__init__()
self.GCT = GCT(inplanes)
self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=
kernel_size, stride=1, padding=padding, dilation=dilation, bias
=False)
self.bn = nn.GroupNorm(int(planes / 4), planes)
self.relu = nn.ReLU(inplace=True)
self._init_weight()
def forward(self, x):
x = self.GCT(x)
x = self.atrous_conv(x)
x = self.bn(x)
return self.relu(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4, 'kernel_size': 4, 'padding': 4,
'dilation': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_pow_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + 1)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + 2)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + 3)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp4 = tmp3 * tmp3
tmp8 = tmp5 * tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp14 = tmp11 * tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp20 = tmp17 * tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = 4.0
tmp24 = tmp22 / tmp23
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tl.store(out_ptr0 + x0, tmp27, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 / tmp4
tmp6 = tmp2 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = libdevice.tanh(tmp8)
tmp10 = 1.0
tmp11 = tmp9 + tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_tanh_3(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_per_fused_native_group_norm_relu_threshold_backward_4(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 324
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
r3 = rindex // 81
tmp0 = tl.load(in_ptr0 + (r1 + 324 * x0), rmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, rmask, eviction_policy='evict_last',
other=0.0)
tmp26 = tl.load(in_ptr2 + r3, rmask, eviction_policy='evict_last',
other=0.0)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tl.where(rmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp8 = tl.full([1], 324, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = tl.where(rmask, tmp13, 0)
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp17 = tmp0 - tmp10
tmp18 = 324.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp30 = 0.0
tmp31 = tmp29 <= tmp30
tl.store(out_ptr2 + (r1 + 324 * x0), tmp29, rmask)
tl.store(out_ptr3 + (r1 + 324 * x0), tmp31, rmask)
tl.store(out_ptr4 + x0, tmp22, None)
tl.store(out_ptr0 + x0, tmp10, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_add_pow_sum_0[grid(16)](buf1, primals_1, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_poi_fused_add_mean_mul_pow_1[grid(4)](buf1, primals_2, buf2,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_tanh_2[grid(16)](buf1,
primals_2, primals_3, buf2, primals_4, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_tanh_3[grid(256)](primals_1,
buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 9, 9), (324, 81, 9, 1))
buf6 = buf2
del buf2
buf10 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32
)
buf11 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.bool)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_native_group_norm_relu_threshold_backward_4[grid(4)](
buf5, primals_6, primals_7, buf6, buf10, buf11, buf9, 4, 324,
num_warps=4, num_stages=1)
del primals_7
return (buf10, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, buf1, buf4, buf5, reinterpret_tensor(buf6, (4, 1), (1, 1
), 0), reinterpret_tensor(buf9, (4, 1), (1, 1), 0), buf11)
class GCT(nn.Module):
def __init__(self, num_channels, epsilon=1e-05, mode='l2', after_relu=False
):
super(GCT, self).__init__()
self.alpha = nn.Parameter(torch.ones(1, num_channels, 1, 1))
self.gamma = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.epsilon = epsilon
self.mode = mode
self.after_relu = after_relu
def forward(self, x):
if self.mode == 'l2':
embedding = (x.pow(2).sum((2, 3), keepdim=True) + self.epsilon
).pow(0.5) * self.alpha
norm = self.gamma / (embedding.pow(2).mean(dim=1, keepdim=True) +
self.epsilon).pow(0.5)
elif self.mode == 'l1':
if not self.after_relu:
_x = torch.abs(x)
else:
_x = x
embedding = _x.sum((2, 3), keepdim=True) * self.alpha
norm = self.gamma / (torch.abs(embedding).mean(dim=1, keepdim=
True) + self.epsilon)
else:
None
exit()
gate = 1.0 + torch.tanh(embedding * norm + self.beta)
return x * gate
class _ASPPModuleNew(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation):
super(_ASPPModuleNew, self).__init__()
self.GCT = GCT(inplanes)
self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=
kernel_size, stride=1, padding=padding, dilation=dilation, bias
=False)
self.bn = nn.GroupNorm(int(planes / 4), planes)
self.relu = nn.ReLU(inplace=True)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input_0):
primals_2 = self.GCT.alpha
primals_3 = self.GCT.gamma
primals_4 = self.GCT.beta
primals_1 = self.atrous_conv.weight
primals_6 = self.bn.weight
primals_7 = self.bn.bias
primals_5 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| yoxu515/CFBI | _ASPPModule | false | 16,847 | [
"BSD-3-Clause"
] | 312 | 0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586 | https://github.com/yoxu515/CFBI/tree/0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586 |
Gaussian | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/x6/cx6jmnk3w4fyizcgdntrll7zx32lso2oe3pzrnaggvqp5atfgroz.py
# Topologically Sorted Source Nodes: [neg, mul, exp], Original ATen: [aten.neg, aten.mul, aten.exp]
# Source node to ATen node mapping:
# exp => exp
# mul => mul
# neg => neg
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %arg0_1), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
triton_poi_fused_exp_mul_neg_0 = async_compile.triton('triton_poi_fused_exp_mul_neg_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_mul_neg_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_mul_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = -tmp0
tmp2 = tmp1 * tmp0
tmp3 = tl_math.exp(tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [neg, mul, exp], Original ATen: [aten.neg, aten.mul, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_exp_mul_neg_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
import torch.utils.tensorboard
import torch.utils.data
class Gaussian(torch.nn.Module):
"""Gaussian activation"""
def forward(self, x: 'Tensor') ->Tensor:
return torch.exp(-x * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.tensorboard
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_mul_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -tmp0
tmp2 = tmp1 * tmp0
tmp3 = tl_math.exp(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_mul_neg_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GaussianNew(torch.nn.Module):
"""Gaussian activation"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| yangyinuo823/torchani | Gaussian | false | 16,848 | [
"MIT"
] | 305 | b0cd62eda59829d197b3c37f2215ba1af64f1c8d | https://github.com/yangyinuo823/torchani/tree/b0cd62eda59829d197b3c37f2215ba1af64f1c8d |
waspIntrinsicComposer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/w5/cw5bynqglxcdm3u2dwrhheo6qsa3bfm4lrb2wjhcdtmuwh5ht5bh.py
# Topologically Sorted Source Nodes: [repeat, mul], Original ATen: [aten.repeat, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# repeat => repeat
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%arg0_1, [1, 4, 1, 1]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%repeat, %arg1_1), kwargs = {})
triton_poi_fused_mul_repeat_0 = async_compile.triton('triton_poi_fused_mul_repeat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_repeat_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16) % 64
x2 = (xindex // 1024)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*(x1 % 16)) + (256*x2)), None)
tmp1 = tl.load(in_ptr1 + (x3), None)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp0, None)
tl.store(out_ptr1 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(arg1_1, (4, 64, 4, 4), (1024, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [repeat, mul], Original ATen: [aten.repeat, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_repeat_0.run(arg0_1, arg1_1, buf0, buf1, 4096, grid=grid(4096), stream=stream0)
del arg0_1
del arg1_1
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 16, 4, 4), (256, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class waspIntrinsicComposer(nn.Module):
def __init__(self, opt):
super(waspIntrinsicComposer, self).__init__()
self.ngpu = opt.ngpu
self.nc = opt.nc
def forward(self, shading, albedo):
self.shading = shading.repeat(1, self.nc, 1, 1)
self.img = torch.mul(self.shading, albedo)
return self.img
def get_inputs():
return [torch.rand([4, 16, 4, 4]), torch.rand([4, 64, 4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(ngpu=False, nc=4)}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_repeat_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16 % 64
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * (x1 % 16) + 256 * x2), None)
tmp1 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp0, None)
tl.store(out_ptr1 + x3, tmp2, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(arg1_1, (4, 64, 4, 4), (1024, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
buf1 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_mul_repeat_0[grid(4096)](arg0_1, arg1_1, buf0,
buf1, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf1, buf0
class waspIntrinsicComposerNew(nn.Module):
def __init__(self, opt):
super(waspIntrinsicComposerNew, self).__init__()
self.ngpu = opt.ngpu
self.nc = opt.nc
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zhixinshu/DeformingAutoencoders-pytorch | waspIntrinsicComposer | false | 16,849 | [
"BSD-2-Clause"
] | 112 | 72996c5d11ae25dd0051bb51df353fef88e65742 | https://github.com/zhixinshu/DeformingAutoencoders-pytorch/tree/72996c5d11ae25dd0051bb51df353fef88e65742 |
VGG16 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/f7/cf7tayhctr3m6ezk7xezotpdlc5h4drokdkbz4vy2pfkbdxnmn4q.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xq/cxq75w43anllid5ys7ss3yyizuoeph3vvaqlvm5lo434hrywtyle.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/32/c32xiwptfqtyhbnde262mvq5tzywzo6zquurttkv7sztqnze6yni.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jj/cjjz4tpbucpuc3faa2ky32crfwhb5fbnssd6o2yfkgdcjg2acfmo.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tg/ctgdsxjd3rciejxtjvi3y2w5fmmggh5lm3mivuygvkdzeb3zulmc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/e7/ce7jqsdrj5poslb2hpufqd2wdux5xiab5n2auqal3ztzvkzrmnzl.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_7 = async_compile.triton('triton_poi_fused_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 131072
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ks/ckso6iiq5yfqfxmx7ilr6ufrmz6mlkiy75pexzhyf3ierq4pu3zl.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_8 = async_compile.triton('triton_poi_fused_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 262144
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rv/crv3uzu52jbc4u62gio2klk6cj5xhjt7yazr75tq67kvtteddsn5.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pv/cpvsl3evhqx3iforbcv3pmtgbfqy4uy2wcdoyzofbr276vu6o5tt.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 2048) % 32
x1 = (xindex // 64) % 32
x0 = xindex % 64
x5 = (xindex // 2048)
x6 = xindex
tmp0 = (-1) + (2*x2)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x1)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-4160) + x0 + (128*x1) + (8192*x5)), tmp10, other=float("-inf"))
tmp12 = 2*x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4096) + x0 + (128*x1) + (8192*x5)), tmp16, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x1)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-4032) + x0 + (128*x1) + (8192*x5)), tmp23, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-64) + x0 + (128*x1) + (8192*x5)), tmp30, other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + (128*x1) + (8192*x5)), tmp33, other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (8192*x5)), tmp36, other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x2)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (4032 + x0 + (128*x1) + (8192*x5)), tmp43, other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + (128*x1) + (8192*x5)), tmp46, other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4160 + x0 + (128*x1) + (8192*x5)), tmp49, other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (x6), tmp51, None)
tl.store(out_ptr1 + (x6), tmp76, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ws/cwssgzseoqxwmttgkoxdmvdzcrtg4ars5flpnsa2at2qixzwygfj.py
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_3 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/um/cum6xchgkedi67ovds4oqwrgb4wb3xx2b4mawk4ifzmwdx74dufw.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 2048) % 16
x1 = (xindex // 128) % 16
x0 = xindex % 128
x5 = (xindex // 2048)
x6 = xindex
tmp0 = (-1) + (2*x2)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x1)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-4224) + x0 + (256*x1) + (8192*x5)), tmp10, other=float("-inf"))
tmp12 = 2*x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4096) + x0 + (256*x1) + (8192*x5)), tmp16, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x1)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-3968) + x0 + (256*x1) + (8192*x5)), tmp23, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-128) + x0 + (256*x1) + (8192*x5)), tmp30, other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + (256*x1) + (8192*x5)), tmp33, other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (8192*x5)), tmp36, other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x2)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3968 + x0 + (256*x1) + (8192*x5)), tmp43, other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + (256*x1) + (8192*x5)), tmp46, other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4224 + x0 + (256*x1) + (8192*x5)), tmp49, other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (x6), tmp51, None)
tl.store(out_ptr1 + (x6), tmp76, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/c3/cc36sjgk3au3ve2witr7srumjy6npsyym5bconvmq65prldokmso.py
# Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x_6 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_13 = async_compile.triton('triton_poi_fused_convolution_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ai/caisra7xszqvj72a6ymkedwyqg4d4k7aoyungicku4dmb53rdkxa.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_9 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_14 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_14(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 2048) % 8
x1 = (xindex // 256) % 8
x0 = xindex % 256
x5 = (xindex // 2048)
x6 = xindex
tmp0 = (-1) + (2*x2)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x1)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-4352) + x0 + (512*x1) + (8192*x5)), tmp10, other=float("-inf"))
tmp12 = 2*x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4096) + x0 + (512*x1) + (8192*x5)), tmp16, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x1)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-3840) + x0 + (512*x1) + (8192*x5)), tmp23, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-256) + x0 + (512*x1) + (8192*x5)), tmp30, other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + (512*x1) + (8192*x5)), tmp33, other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (8192*x5)), tmp36, other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x2)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3840 + x0 + (512*x1) + (8192*x5)), tmp43, other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + (512*x1) + (8192*x5)), tmp46, other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4352 + x0 + (512*x1) + (8192*x5)), tmp49, other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (x6), tmp51, None)
tl.store(out_ptr1 + (x6), tmp76, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bo/cbory36nvcjc37vmkyigprzjn5qrg2tdk4ivdkunxl3icdtgur5z.py
# Topologically Sorted Source Nodes: [conv2d_7, x_10], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# x_10 => relu_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
triton_poi_fused_convolution_relu_15 = async_compile.triton('triton_poi_fused_convolution_relu_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7j/c7jd3cm6j3ovynecovhl5prgqhcik5umc42dyk3cqoyl3ul6ahpm.py
# Topologically Sorted Source Nodes: [conv2d_12, x_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_12 => convolution_12
# x_15 => relu_12
# Graph fragment:
# %convolution_12 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_11, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_12 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_12,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_12, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_16 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 64], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = (yindex // 512)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (512*x2) + (32768*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (64*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (512*x2) + (32768*y1)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512, ), (1, ))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512, ), (1, ))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512, ), (1, ))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512, ), (1, ))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 192, 9, grid=grid(192, 9), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 4096, 9, grid=grid(4096, 9), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0)
del primals_8
buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_10, buf5, 32768, 9, grid=grid(32768, 9), stream=stream0)
del primals_10
buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_12, buf6, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_12
buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_14, buf7, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_14
buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_7.run(primals_16, buf8, 131072, 9, grid=grid(131072, 9), stream=stream0)
del primals_16
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_18, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_18
buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_20, buf10, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_20
buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_22, buf11, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_22
buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_24, buf12, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_24
buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_26, buf13, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_26
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf15, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf17, primals_5, 1048576, grid=grid(1048576), stream=stream0)
del primals_5
buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32)
buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_10.run(buf17, buf18, buf19, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf21, primals_7, 524288, grid=grid(524288), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf23, primals_9, 524288, grid=grid(524288), stream=stream0)
del primals_9
buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32)
buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_12.run(buf23, buf24, buf25, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf24, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_13.run(buf27, primals_11, 262144, grid=grid(262144), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf29 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_13.run(buf29, primals_13, 262144, grid=grid(262144), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf31 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [conv2d_6, x_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_13.run(buf31, primals_15, 262144, grid=grid(262144), stream=stream0)
del primals_15
buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32)
buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_14.run(buf31, buf32, buf33, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf32, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [conv2d_7, x_10], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_15.run(buf35, primals_17, 131072, grid=grid(131072), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf35, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf37 = buf36; del buf36 # reuse
# Topologically Sorted Source Nodes: [conv2d_8, x_11], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_15.run(buf37, primals_19, 131072, grid=grid(131072), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf39 = buf38; del buf38 # reuse
# Topologically Sorted Source Nodes: [conv2d_9, x_12], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_15.run(buf39, primals_21, 131072, grid=grid(131072), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf40 = extern_kernels.convolution(buf39, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf41 = buf40; del buf40 # reuse
# Topologically Sorted Source Nodes: [conv2d_10, x_13], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_15.run(buf41, primals_23, 131072, grid=grid(131072), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf41, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf43 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [conv2d_11, x_14], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_15.run(buf43, primals_25, 131072, grid=grid(131072), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf44 = extern_kernels.convolution(buf43, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf45 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.float32)
buf46 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_12, x_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_16.run(buf44, primals_27, buf45, buf46, 2048, 64, grid=grid(2048, 64), stream=stream0)
del buf44
del primals_27
return (buf45, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf24, buf25, buf27, buf29, buf31, buf32, buf33, buf35, buf37, buf39, buf41, buf43, buf46, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import functional as F
class VGG16(nn.Module):
def __init__(self, conv5_dilation=1):
super(VGG16, self).__init__()
None
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=conv5_dilation,
dilation=conv5_dilation)
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=conv5_dilation,
dilation=conv5_dilation)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=conv5_dilation,
dilation=conv5_dilation)
self.not_training = []
self.from_scratch_layers = []
def forward(self, x):
x = F.relu(self.conv1_1(x))
x = F.relu(self.conv1_2(x))
x = self.pool1(x)
x = F.relu(self.conv2_1(x))
x = F.relu(self.conv2_2(x))
x = self.pool2(x)
x = F.relu(self.conv3_1(x))
x = F.relu(self.conv3_2(x))
x = F.relu(self.conv3_3(x))
x = self.pool3(x)
x = F.relu(self.conv4_1(x))
x = F.relu(self.conv4_2(x))
x = F.relu(self.conv4_3(x))
x = F.relu(self.conv5_1(x))
x = F.relu(self.conv5_2(x))
x = F.relu(self.conv5_3(x))
return x
@property
def out_channel(self):
return 512
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 2048 % 32
x1 = xindex // 64 % 32
x0 = xindex % 64
x5 = xindex // 2048
x6 = xindex
tmp0 = -1 + 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-4160 + x0 + 128 * x1 + 8192 * x5), tmp10,
other=float('-inf'))
tmp12 = 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4096 + x0 + 128 * x1 + 8192 * x5), tmp16,
other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-4032 + x0 + 128 * x1 + 8192 * x5), tmp23,
other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-64 + x0 + 128 * x1 + 8192 * x5), tmp30,
other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x5), tmp33, other=
float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x5), tmp36,
other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (4032 + x0 + 128 * x1 + 8192 * x5), tmp43,
other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x5), tmp46,
other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x5), tmp49,
other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x6, tmp51, None)
tl.store(out_ptr1 + x6, tmp76, None)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 2048 % 16
x1 = xindex // 128 % 16
x0 = xindex % 128
x5 = xindex // 2048
x6 = xindex
tmp0 = -1 + 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-4224 + x0 + 256 * x1 + 8192 * x5), tmp10,
other=float('-inf'))
tmp12 = 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4096 + x0 + 256 * x1 + 8192 * x5), tmp16,
other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3968 + x0 + 256 * x1 + 8192 * x5), tmp23,
other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-128 + x0 + 256 * x1 + 8192 * x5), tmp30,
other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x5), tmp33, other=
float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x5), tmp36,
other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3968 + x0 + 256 * x1 + 8192 * x5), tmp43,
other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x5), tmp46,
other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x5), tmp49,
other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x6, tmp51, None)
tl.store(out_ptr1 + x6, tmp76, None)
@triton.jit
def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_14(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 2048 % 8
x1 = xindex // 256 % 8
x0 = xindex % 256
x5 = xindex // 2048
x6 = xindex
tmp0 = -1 + 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-4352 + x0 + 512 * x1 + 8192 * x5), tmp10,
other=float('-inf'))
tmp12 = 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4096 + x0 + 512 * x1 + 8192 * x5), tmp16,
other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3840 + x0 + 512 * x1 + 8192 * x5), tmp23,
other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-256 + x0 + 512 * x1 + 8192 * x5), tmp30,
other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x5), tmp33, other=
float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x5), tmp36,
other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3840 + x0 + 512 * x1 + 8192 * x5), tmp43,
other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x5), tmp46,
other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x5), tmp49,
other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x6, tmp51, None)
tl.store(out_ptr1 + x6, tmp76, None)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = yindex // 512
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 64 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 512 * x2 + 32768 * y1), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512,), (1,))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_22
buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_24
buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_26
buf14 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_9[grid(1048576)](buf15, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_9[grid(1048576)](buf17, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.float32)
buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(262144)](buf17,
buf18, buf19, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_11[grid(524288)](buf21, primals_7,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_11[grid(524288)](buf23, primals_9,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128),
torch.float32)
buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_12[grid(131072)](buf23,
buf24, buf25, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf26 = extern_kernels.convolution(buf24, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_13[grid(262144)](buf27,
primals_11, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf28 = extern_kernels.convolution(buf27, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_13[grid(262144)](buf29,
primals_13, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf30 = extern_kernels.convolution(buf29, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_13[grid(262144)](buf31,
primals_15, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_15
buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256),
torch.float32)
buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_14[grid(65536)](buf31,
buf32, buf33, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf34 = extern_kernels.convolution(buf32, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf35 = buf34
del buf34
triton_poi_fused_convolution_relu_15[grid(131072)](buf35,
primals_17, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf36 = extern_kernels.convolution(buf35, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf37 = buf36
del buf36
triton_poi_fused_convolution_relu_15[grid(131072)](buf37,
primals_19, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_19
buf38 = extern_kernels.convolution(buf37, buf10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf39 = buf38
del buf38
triton_poi_fused_convolution_relu_15[grid(131072)](buf39,
primals_21, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_21
buf40 = extern_kernels.convolution(buf39, buf11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf41 = buf40
del buf40
triton_poi_fused_convolution_relu_15[grid(131072)](buf41,
primals_23, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_23
buf42 = extern_kernels.convolution(buf41, buf12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf43 = buf42
del buf42
triton_poi_fused_convolution_relu_15[grid(131072)](buf43,
primals_25, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_25
buf44 = extern_kernels.convolution(buf43, buf13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf45 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.float32)
buf46 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_16[grid(2048, 64)
](buf44, primals_27, buf45, buf46, 2048, 64, XBLOCK=32, YBLOCK=
32, num_warps=4, num_stages=1)
del buf44
del primals_27
return (buf45, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8,
buf9, buf10, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21,
buf23, buf24, buf25, buf27, buf29, buf31, buf32, buf33, buf35,
buf37, buf39, buf41, buf43, buf46)
class VGG16New(nn.Module):
def __init__(self, conv5_dilation=1):
super(VGG16New, self).__init__()
None
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=conv5_dilation,
dilation=conv5_dilation)
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=conv5_dilation,
dilation=conv5_dilation)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=conv5_dilation,
dilation=conv5_dilation)
self.not_training = []
self.from_scratch_layers = []
@property
def out_channel(self):
return 512
def forward(self, input_0):
primals_1 = self.conv1_1.weight
primals_2 = self.conv1_1.bias
primals_4 = self.conv1_2.weight
primals_5 = self.conv1_2.bias
primals_6 = self.conv2_1.weight
primals_7 = self.conv2_1.bias
primals_8 = self.conv2_2.weight
primals_9 = self.conv2_2.bias
primals_10 = self.conv3_1.weight
primals_11 = self.conv3_1.bias
primals_12 = self.conv3_2.weight
primals_13 = self.conv3_2.bias
primals_14 = self.conv3_3.weight
primals_15 = self.conv3_3.bias
primals_16 = self.conv4_1.weight
primals_17 = self.conv4_1.bias
primals_18 = self.conv4_2.weight
primals_19 = self.conv4_2.bias
primals_20 = self.conv4_3.weight
primals_21 = self.conv4_3.bias
primals_22 = self.conv5_2.weight
primals_23 = self.conv5_2.bias
primals_24 = self.conv5_1.weight
primals_25 = self.conv5_1.bias
primals_26 = self.conv5_3.weight
primals_27 = self.conv5_3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27])
return output[0]
| yaoqi-zd/SGAN | VGG16 | false | 16,850 | [
"MIT"
] | 48 | 43d8a859b03967e2423a73ef1ba332ee71714ba4 | https://github.com/yaoqi-zd/SGAN/tree/43d8a859b03967e2423a73ef1ba332ee71714ba4 |
BridgeConnection | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gm/cgmflgdlpeeb52xctoa47uvw47ycyf7ahlj5wdscxdatpbwcboco.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_5, buf5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf4, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf5, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.utils import tensorboard as tensorboard
class BridgeConnection(nn.Module):
def __init__(self, in_dim, out_dim, dout_p):
super(BridgeConnection, self).__init__()
self.norm = nn.LayerNorm(in_dim)
self.linear = nn.Linear(in_dim, out_dim)
self.dropout = nn.Dropout(dout_p)
self.activation = nn.ReLU()
def forward(self, x):
x = self.norm(x)
x = self.linear(x)
x = self.dropout(x)
return self.activation(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4, 'dout_p': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.utils import tensorboard as tensorboard
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(256)](buf4,
primals_5, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf4, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf5, primals_4
class BridgeConnectionNew(nn.Module):
def __init__(self, in_dim, out_dim, dout_p):
super(BridgeConnectionNew, self).__init__()
self.norm = nn.LayerNorm(in_dim)
self.linear = nn.Linear(in_dim, out_dim)
self.dropout = nn.Dropout(dout_p)
self.activation = nn.ReLU()
def forward(self, input_0):
primals_1 = self.norm.weight
primals_2 = self.norm.bias
primals_4 = self.linear.weight
primals_5 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| valterlej/CustomBMT | BridgeConnection | false | 16,851 | [
"MIT"
] | 157 | c9326752d1355c81f845f2caab9c047be76067de | https://github.com/valterlej/CustomBMT/tree/c9326752d1355c81f845f2caab9c047be76067de |
FeatureEmbedder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/da/cdaptt64vfikv5ga5f6eoweykxghpxinzir5hxp27d77vndkhwzf.py
# Topologically Sorted Source Nodes: [wrapped_sqrt, x_1, x_2], Original ATen: [aten.sqrt, aten.mul, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# wrapped_sqrt => full_default
# x_1 => mul
# x_2 => relu
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 2.0), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %full_default), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mul,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_mul_relu_sqrt_threshold_backward_0 = async_compile.triton('triton_poi_fused_mul_relu_sqrt_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_relu_sqrt_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_relu_sqrt_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [wrapped_sqrt, x_1, x_2], Original ATen: [aten.sqrt, aten.mul, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_relu_sqrt_threshold_backward_0.run(buf1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
from torch.utils import tensorboard as tensorboard
class FeatureEmbedder(nn.Module):
def __init__(self, d_feat, d_model):
super(FeatureEmbedder, self).__init__()
self.d_model = d_model
self.embedder = nn.Linear(d_feat, d_model)
self.activation = nn.ReLU()
def forward(self, x):
x = self.embedder(x)
x = x * np.sqrt(self.d_model)
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_feat': 4, 'd_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.utils import tensorboard as tensorboard
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_relu_sqrt_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_mul_relu_sqrt_threshold_backward_0[grid(256)](buf1,
primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class FeatureEmbedderNew(nn.Module):
def __init__(self, d_feat, d_model):
super(FeatureEmbedderNew, self).__init__()
self.d_model = d_model
self.embedder = nn.Linear(d_feat, d_model)
self.activation = nn.ReLU()
def forward(self, input_0):
primals_1 = self.embedder.weight
primals_2 = self.embedder.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| valterlej/CustomBMT | FeatureEmbedder | false | 16,852 | [
"MIT"
] | 157 | c9326752d1355c81f845f2caab9c047be76067de | https://github.com/valterlej/CustomBMT/tree/c9326752d1355c81f845f2caab9c047be76067de |
SpatialCGNL | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/e2/ce2x2moi5fwoee3pvypf3wm2atpt3rbddfs2zehpzgcrrkyqngy2.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_16 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%view_3, %view_7, %view_11, %view_15, %view_19, %view_23, %view_27, %view_31], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 32
x0 = xindex % 4096
x2 = (xindex // 131072)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (16384*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (4096*((-4) + x1)) + (16384*x2)), tmp9, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + (4096*((-8) + x1)) + (16384*x2)), tmp14, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr3 + (x0 + (4096*((-12) + x1)) + (16384*x2)), tmp19, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr4 + (x0 + (4096*((-16) + x1)) + (16384*x2)), tmp24, other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 24, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr5 + (x0 + (4096*((-20) + x1)) + (16384*x2)), tmp29, other=0.0)
tmp31 = tmp0 >= tmp27
tmp32 = tl.full([1], 28, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr6 + (x0 + (4096*((-24) + x1)) + (16384*x2)), tmp34, other=0.0)
tmp36 = tmp0 >= tmp32
tmp37 = tl.full([1], 32, tl.int64)
tmp38 = tmp0 < tmp37
tmp39 = tl.load(in_ptr7 + (x0 + (4096*((-28) + x1)) + (16384*x2)), tmp36, other=0.0)
tmp40 = tl.where(tmp34, tmp35, tmp39)
tmp41 = tl.where(tmp29, tmp30, tmp40)
tmp42 = tl.where(tmp24, tmp25, tmp41)
tmp43 = tl.where(tmp19, tmp20, tmp42)
tmp44 = tl.where(tmp14, tmp15, tmp43)
tmp45 = tl.where(tmp9, tmp10, tmp44)
tmp46 = tl.where(tmp4, tmp5, tmp45)
tl.store(out_ptr0 + (x3), tmp46, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ie/cie4kqmlmj4llpnptqwxqdy4gpwabaf6m6gyox7up4mkjwhfurjj.py
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm => var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_32, [2, 3]), kwargs = {correction: 0, keepdim: True})
triton_red_fused_native_group_norm_1 = async_compile.triton('triton_red_fused_native_group_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 8192],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp3, xmask)
tl.store(out_ptr2 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fm/cfmclpnvyphi533zqlu2km43abzc5pfmnqb72aqcbkflf6ocdr7p.py
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_32, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_24, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused_native_group_norm_2 = async_compile.triton('triton_per_fused_native_group_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[32, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 32
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (4*x0)), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + (4*x0)), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp15 = tmp12[:, None]
tmp16 = 32768.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(out_ptr2 + (x0), tmp20, xmask)
tl.store(out_ptr0 + (x0), tmp13, xmask)
tl.store(out_ptr1 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wk/cwkiwomgpq262gfrrfincfpqg7rcmk2btwzkabdmndljhlbdkjb3.py
# Topologically Sorted Source Nodes: [group_norm, x_18], Original ATen: [aten.native_group_norm, aten.add]
# Source node to ATen node mapping:
# group_norm => add_1, mul_1
# x_18 => add_2
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_33, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_1), kwargs = {})
triton_poi_fused_add_native_group_norm_3 = async_compile.triton('triton_poi_fused_add_native_group_norm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_group_norm_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x4 = (xindex // 4096)
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((x4 // 8)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((x4 // 8)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + (x3), None)
tmp2 = tmp0 - tmp1
tmp4 = 32768.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ql/cqlvhxu3bqj4jfmcsbv3cwbb62hh3cmli6wtsaxarvc4gjyyxcs4.py
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
# Source node to ATen node mapping:
# Graph fragment:
# %permute_1 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_28, [0, 2, 1]), kwargs = {})
triton_poi_fused_transpose_4 = async_compile.triton('triton_poi_fused_transpose_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_transpose_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = (xindex // 16384)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (114688 + x0 + (131072*x1)), None)
tl.store(out_ptr0 + (x2), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qs/cqswaajrn7x7wnf3d2w7i637gtpb5ag7xtnbw4rhzzut5drvp73a.py
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
# Source node to ATen node mapping:
# Graph fragment:
# %permute_5 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_24, [0, 2, 1]), kwargs = {})
triton_poi_fused_transpose_5 = async_compile.triton('triton_poi_fused_transpose_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_transpose_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = (xindex // 16384)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (98304 + x0 + (131072*x1)), None)
tl.store(out_ptr0 + (x2), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mg/cmg5vmdavuulxdhy4noe2jabq76uar6lfijjomvmoukdobypfnng.py
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
# Source node to ATen node mapping:
# Graph fragment:
# %permute_9 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_20, [0, 2, 1]), kwargs = {})
triton_poi_fused_transpose_6 = async_compile.triton('triton_poi_fused_transpose_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_transpose_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = (xindex // 16384)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (81920 + x0 + (131072*x1)), None)
tl.store(out_ptr0 + (x2), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/24/c24wrwseozc2z2npt2bnuuvn34lb5cxfl6f3hn3zszfxqnwsq7zl.py
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
# Source node to ATen node mapping:
# Graph fragment:
# %permute_13 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_16, [0, 2, 1]), kwargs = {})
triton_poi_fused_transpose_7 = async_compile.triton('triton_poi_fused_transpose_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_transpose_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = (xindex // 16384)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (65536 + x0 + (131072*x1)), None)
tl.store(out_ptr0 + (x2), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ef/cefjlicda2mn35gtgpl3dpnftre2b7b57exm7hvcz5dc4pwml2si.py
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
# Source node to ATen node mapping:
# Graph fragment:
# %permute_17 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_12, [0, 2, 1]), kwargs = {})
triton_poi_fused_transpose_8 = async_compile.triton('triton_poi_fused_transpose_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_transpose_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = (xindex // 16384)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (49152 + x0 + (131072*x1)), None)
tl.store(out_ptr0 + (x2), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qa/cqawy7nqngvnt24xruytkdjsnbcnxxwsonckape4medtusyshtz2.py
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
# Source node to ATen node mapping:
# Graph fragment:
# %permute_21 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_8, [0, 2, 1]), kwargs = {})
triton_poi_fused_transpose_9 = async_compile.triton('triton_poi_fused_transpose_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_transpose_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = (xindex // 16384)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32768 + x0 + (131072*x1)), None)
tl.store(out_ptr0 + (x2), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mm/cmmy4ld62jkwskckzrthzj2untwnglmazxkzenauarr2knfyh7dr.py
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
# Source node to ATen node mapping:
# Graph fragment:
# %permute_25 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_4, [0, 2, 1]), kwargs = {})
triton_poi_fused_transpose_10 = async_compile.triton('triton_poi_fused_transpose_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_transpose_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = (xindex // 16384)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16384 + x0 + (131072*x1)), None)
tl.store(out_ptr0 + (x2), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/in/cin4zvwjsji4ohxslxhczct2fkgy4cmz6j6xl744kzg5syi2ih7d.py
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
# Source node to ATen node mapping:
# Graph fragment:
# %permute_29 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view, [0, 2, 1]), kwargs = {})
triton_poi_fused_transpose_11 = async_compile.triton('triton_poi_fused_transpose_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_transpose_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = (xindex // 16384)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (131072*x1)), None)
tl.store(out_ptr0 + (x2), tmp0, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_2, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_3, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_4, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_5, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (64, ), (1, ))
assert_size_stride(primals_7, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [t], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
# Topologically Sorted Source Nodes: [p], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 64, 64), (131072, 4096, 64, 1))
# Topologically Sorted Source Nodes: [g], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [att], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072, 0, 1), 0), reinterpret_tensor(buf2, (4, 16384, 1), (131072, 1, 0), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf0, (4, 1, 16384), (131072, 0, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072, 0, 1), 16384), reinterpret_tensor(buf2, (4, 16384, 1), (131072, 1, 0), 16384), out=buf5)
buf6 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.bmm]
extern_kernels.bmm(buf5, reinterpret_tensor(buf0, (4, 1, 16384), (131072, 0, 1), 16384), out=buf6)
buf7 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072, 0, 1), 32768), reinterpret_tensor(buf2, (4, 16384, 1), (131072, 1, 0), 32768), out=buf7)
buf8 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf0, (4, 1, 16384), (131072, 0, 1), 32768), out=buf8)
buf9 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072, 0, 1), 49152), reinterpret_tensor(buf2, (4, 16384, 1), (131072, 1, 0), 49152), out=buf9)
buf10 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.bmm]
extern_kernels.bmm(buf9, reinterpret_tensor(buf0, (4, 1, 16384), (131072, 0, 1), 49152), out=buf10)
buf11 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_4], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072, 0, 1), 65536), reinterpret_tensor(buf2, (4, 16384, 1), (131072, 1, 0), 65536), out=buf11)
buf12 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.bmm]
extern_kernels.bmm(buf11, reinterpret_tensor(buf0, (4, 1, 16384), (131072, 0, 1), 65536), out=buf12)
buf13 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072, 0, 1), 81920), reinterpret_tensor(buf2, (4, 16384, 1), (131072, 1, 0), 81920), out=buf13)
buf14 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.bmm]
extern_kernels.bmm(buf13, reinterpret_tensor(buf0, (4, 1, 16384), (131072, 0, 1), 81920), out=buf14)
buf15 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_6], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072, 0, 1), 98304), reinterpret_tensor(buf2, (4, 16384, 1), (131072, 1, 0), 98304), out=buf15)
buf16 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.bmm]
extern_kernels.bmm(buf15, reinterpret_tensor(buf0, (4, 1, 16384), (131072, 0, 1), 98304), out=buf16)
buf17 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_7], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072, 0, 1), 114688), reinterpret_tensor(buf2, (4, 16384, 1), (131072, 1, 0), 114688), out=buf17)
buf18 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.bmm]
extern_kernels.bmm(buf17, reinterpret_tensor(buf0, (4, 1, 16384), (131072, 0, 1), 114688), out=buf18)
buf19 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf4, buf6, buf8, buf10, buf12, buf14, buf16, buf18, buf19, 524288, grid=grid(524288), stream=stream0)
# Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=8, bias=None)
assert_size_stride(buf20, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf21 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32)
buf22 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32)
buf23 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32)
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_1.run(buf20, buf21, buf22, buf23, 128, 8192, grid=grid(128), stream=stream0)
buf24 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32)
buf25 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32)
buf27 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32)
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_2.run(buf21, buf22, buf23, buf24, buf25, buf27, 32, 4, grid=grid(32), stream=stream0)
del buf21
del buf22
del buf23
buf28 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [group_norm, x_18], Original ATen: [aten.native_group_norm, aten.add]
triton_poi_fused_add_native_group_norm_3.run(buf20, buf24, buf25, primals_6, primals_7, primals_1, buf28, 1048576, grid=grid(1048576), stream=stream0)
del buf25
del primals_7
buf29 = reinterpret_tensor(buf8, (4, 16384, 1), (16384, 1, 16384), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_4.run(buf0, buf29, 65536, grid=grid(65536), stream=stream0)
buf30 = reinterpret_tensor(buf6, (4, 16384, 1), (16384, 1, 16384), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_4.run(buf1, buf30, 65536, grid=grid(65536), stream=stream0)
buf31 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_4.run(buf2, buf31, 65536, grid=grid(65536), stream=stream0)
buf32 = reinterpret_tensor(buf18, (4, 16384, 1), (16384, 1, 16384), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_5.run(buf0, buf32, 65536, grid=grid(65536), stream=stream0)
buf33 = reinterpret_tensor(buf16, (4, 16384, 1), (16384, 1, 16384), 0); del buf16 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_5.run(buf1, buf33, 65536, grid=grid(65536), stream=stream0)
buf34 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_5.run(buf2, buf34, 65536, grid=grid(65536), stream=stream0)
buf35 = reinterpret_tensor(buf12, (4, 16384, 1), (16384, 1, 16384), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_6.run(buf0, buf35, 65536, grid=grid(65536), stream=stream0)
buf36 = reinterpret_tensor(buf10, (4, 16384, 1), (16384, 1, 16384), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_6.run(buf1, buf36, 65536, grid=grid(65536), stream=stream0)
buf37 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_6.run(buf2, buf37, 65536, grid=grid(65536), stream=stream0)
buf38 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_7.run(buf0, buf38, 65536, grid=grid(65536), stream=stream0)
buf39 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_7.run(buf1, buf39, 65536, grid=grid(65536), stream=stream0)
buf40 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_7.run(buf2, buf40, 65536, grid=grid(65536), stream=stream0)
buf41 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_8.run(buf0, buf41, 65536, grid=grid(65536), stream=stream0)
buf42 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_8.run(buf1, buf42, 65536, grid=grid(65536), stream=stream0)
buf43 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_8.run(buf2, buf43, 65536, grid=grid(65536), stream=stream0)
buf44 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_9.run(buf0, buf44, 65536, grid=grid(65536), stream=stream0)
buf45 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_9.run(buf1, buf45, 65536, grid=grid(65536), stream=stream0)
buf46 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_9.run(buf2, buf46, 65536, grid=grid(65536), stream=stream0)
buf47 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_10.run(buf0, buf47, 65536, grid=grid(65536), stream=stream0)
buf48 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_10.run(buf1, buf48, 65536, grid=grid(65536), stream=stream0)
buf49 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_10.run(buf2, buf49, 65536, grid=grid(65536), stream=stream0)
buf50 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_11.run(buf0, buf50, 65536, grid=grid(65536), stream=stream0)
del buf0
buf51 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_11.run(buf1, buf51, 65536, grid=grid(65536), stream=stream0)
del buf1
buf52 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_11.run(buf2, buf52, 65536, grid=grid(65536), stream=stream0)
del buf2
return (buf28, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf19, buf20, reinterpret_tensor(buf24, (4, 8), (8, 1), 0), reinterpret_tensor(buf27, (4, 8), (8, 1), 0), buf17, buf29, buf30, buf31, buf15, buf32, buf33, buf34, buf13, buf35, buf36, buf37, buf11, buf38, buf39, buf40, buf9, buf41, buf42, buf43, buf7, buf44, buf45, buf46, buf5, buf47, buf48, buf49, buf3, buf50, buf51, buf52, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 64, 64, 64), (262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SpatialCGNL(nn.Module):
"""Spatial CGNL block with dot production kernel for image classfication.
"""
def __init__(self, inplanes, planes, use_scale=False, groups=8):
self.use_scale = use_scale
self.groups = groups
super(SpatialCGNL, self).__init__()
self.t = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=
False)
self.p = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=
False)
self.g = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=
False)
self.z = nn.Conv2d(planes, inplanes, kernel_size=1, stride=1,
groups=self.groups, bias=False)
self.gn = nn.GroupNorm(num_groups=self.groups, num_channels=inplanes)
if self.use_scale:
None
if self.groups:
None
def kernel(self, t, p, g, b, c, h, w):
"""The linear kernel (dot production).
Args:
t: output of conv theata
p: output of conv phi
g: output of conv g
b: batch size
c: channels number
h: height of featuremaps
w: width of featuremaps
"""
t = t.view(b, 1, c * h * w)
p = p.view(b, 1, c * h * w)
g = g.view(b, c * h * w, 1)
att = torch.bmm(p, g)
if self.use_scale:
att = att.div((c * h * w) ** 0.5)
x = torch.bmm(att, t)
x = x.view(b, c, h, w)
return x
def forward(self, x):
residual = x
t = self.t(x)
p = self.p(x)
g = self.g(x)
b, c, h, w = t.size()
if self.groups and self.groups > 1:
_c = c // self.groups
ts = torch.split(t, split_size_or_sections=_c, dim=1)
ps = torch.split(p, split_size_or_sections=_c, dim=1)
gs = torch.split(g, split_size_or_sections=_c, dim=1)
_t_sequences = []
for i in range(self.groups):
_x = self.kernel(ts[i], ps[i], gs[i], b, _c, h, w)
_t_sequences.append(_x)
x = torch.cat(_t_sequences, dim=1)
else:
x = self.kernel(t, p, g, b, c, h, w)
x = self.z(x)
x = self.gn(x) + residual
return x
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {'inplanes': 64, 'planes': 32}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 32
x0 = xindex % 4096
x2 = xindex // 131072
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 16384 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4096 * (-4 + x1) + 16384 * x2), tmp9,
other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4096 * (-8 + x1) + 16384 * x2), tmp14,
other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr3 + (x0 + 4096 * (-12 + x1) + 16384 * x2), tmp19,
other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr4 + (x0 + 4096 * (-16 + x1) + 16384 * x2), tmp24,
other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 24, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr5 + (x0 + 4096 * (-20 + x1) + 16384 * x2), tmp29,
other=0.0)
tmp31 = tmp0 >= tmp27
tmp32 = tl.full([1], 28, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr6 + (x0 + 4096 * (-24 + x1) + 16384 * x2), tmp34,
other=0.0)
tmp36 = tmp0 >= tmp32
tl.full([1], 32, tl.int64)
tmp39 = tl.load(in_ptr7 + (x0 + 4096 * (-28 + x1) + 16384 * x2), tmp36,
other=0.0)
tmp40 = tl.where(tmp34, tmp35, tmp39)
tmp41 = tl.where(tmp29, tmp30, tmp40)
tmp42 = tl.where(tmp24, tmp25, tmp41)
tmp43 = tl.where(tmp19, tmp20, tmp42)
tmp44 = tl.where(tmp14, tmp15, tmp43)
tmp45 = tl.where(tmp9, tmp10, tmp44)
tmp46 = tl.where(tmp4, tmp5, tmp45)
tl.store(out_ptr0 + x3, tmp46, None)
@triton.jit
def triton_red_fused_native_group_norm_1(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp3, xmask)
tl.store(out_ptr2 + x0, tmp4, xmask)
@triton.jit
def triton_per_fused_native_group_norm_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 32
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0)
tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp3, 0)
tmp8 = tl.where(xmask, tmp4, 0)
tmp9 = tl.where(xmask, tmp5, 0)
tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1)
tmp13 = tmp10[:, None]
tmp14 = tmp11[:, None]
tmp12[:, None]
tmp16 = 32768.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(out_ptr2 + x0, tmp20, xmask)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x4 = xindex // 4096
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x4 // 8, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4 // 8, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x3, None)
tmp2 = tmp0 - tmp1
tmp4 = 32768.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_transpose_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (114688 + x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_transpose_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (98304 + x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_transpose_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (81920 + x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_transpose_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (65536 + x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_transpose_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (49152 + x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_transpose_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32768 + x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_transpose_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16384 + x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_transpose_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16384
x1 = xindex // 16384
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 131072 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_2, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_3, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_4, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_5, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (64,), (1,))
assert_size_stride(primals_7, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 0), reinterpret_tensor(buf2, (4, 16384, 1), (131072, 1,
0), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 16384), reinterpret_tensor(buf2, (4, 16384, 1), (131072,
1, 0), 16384), out=buf5)
buf6 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf5, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 16384), out=buf6)
buf7 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 32768), reinterpret_tensor(buf2, (4, 16384, 1), (131072,
1, 0), 32768), out=buf7)
buf8 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 32768), out=buf8)
buf9 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 49152), reinterpret_tensor(buf2, (4, 16384, 1), (131072,
1, 0), 49152), out=buf9)
buf10 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 49152), out=buf10)
buf11 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 65536), reinterpret_tensor(buf2, (4, 16384, 1), (131072,
1, 0), 65536), out=buf11)
buf12 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf11, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 65536), out=buf12)
buf13 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 81920), reinterpret_tensor(buf2, (4, 16384, 1), (131072,
1, 0), 81920), out=buf13)
buf14 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf13, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 81920), out=buf14)
buf15 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 98304), reinterpret_tensor(buf2, (4, 16384, 1), (131072,
1, 0), 98304), out=buf15)
buf16 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf15, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 98304), out=buf16)
buf17 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 16384), (131072,
0, 1), 114688), reinterpret_tensor(buf2, (4, 16384, 1), (131072,
1, 0), 114688), out=buf17)
buf18 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
extern_kernels.bmm(buf17, reinterpret_tensor(buf0, (4, 1, 16384), (
131072, 0, 1), 114688), out=buf18)
buf19 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(524288)](buf4, buf6, buf8, buf10, buf12,
buf14, buf16, buf18, buf19, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
buf20 = extern_kernels.convolution(buf19, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=8, bias=None)
assert_size_stride(buf20, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf21 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1),
torch.float32)
buf22 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1),
torch.float32)
buf23 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1),
torch.float32)
triton_red_fused_native_group_norm_1[grid(128)](buf20, buf21, buf22,
buf23, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf24 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32)
buf25 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32)
buf27 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32)
triton_per_fused_native_group_norm_2[grid(32)](buf21, buf22, buf23,
buf24, buf25, buf27, 32, 4, XBLOCK=32, num_warps=2, num_stages=1)
del buf21
del buf22
del buf23
buf28 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_add_native_group_norm_3[grid(1048576)](buf20,
buf24, buf25, primals_6, primals_7, primals_1, buf28, 1048576,
XBLOCK=1024, num_warps=4, num_stages=1)
del buf25
del primals_7
buf29 = reinterpret_tensor(buf8, (4, 16384, 1), (16384, 1, 16384), 0)
del buf8
triton_poi_fused_transpose_4[grid(65536)](buf0, buf29, 65536,
XBLOCK=256, num_warps=4, num_stages=1)
buf30 = reinterpret_tensor(buf6, (4, 16384, 1), (16384, 1, 16384), 0)
del buf6
triton_poi_fused_transpose_4[grid(65536)](buf1, buf30, 65536,
XBLOCK=256, num_warps=4, num_stages=1)
buf31 = buf4
del buf4
triton_poi_fused_transpose_4[grid(65536)](buf2, buf31, 65536,
XBLOCK=256, num_warps=4, num_stages=1)
buf32 = reinterpret_tensor(buf18, (4, 16384, 1), (16384, 1, 16384), 0)
del buf18
triton_poi_fused_transpose_5[grid(65536)](buf0, buf32, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf33 = reinterpret_tensor(buf16, (4, 16384, 1), (16384, 1, 16384), 0)
del buf16
triton_poi_fused_transpose_5[grid(65536)](buf1, buf33, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf34 = buf14
del buf14
triton_poi_fused_transpose_5[grid(65536)](buf2, buf34, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf35 = reinterpret_tensor(buf12, (4, 16384, 1), (16384, 1, 16384), 0)
del buf12
triton_poi_fused_transpose_6[grid(65536)](buf0, buf35, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf36 = reinterpret_tensor(buf10, (4, 16384, 1), (16384, 1, 16384), 0)
del buf10
triton_poi_fused_transpose_6[grid(65536)](buf1, buf36, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf37 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
triton_poi_fused_transpose_6[grid(65536)](buf2, buf37, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf38 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_7[grid(65536)](buf0, buf38, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf39 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_7[grid(65536)](buf1, buf39, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf40 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
triton_poi_fused_transpose_7[grid(65536)](buf2, buf40, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf41 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_8[grid(65536)](buf0, buf41, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf42 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_8[grid(65536)](buf1, buf42, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf43 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
triton_poi_fused_transpose_8[grid(65536)](buf2, buf43, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf44 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_9[grid(65536)](buf0, buf44, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf45 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_9[grid(65536)](buf1, buf45, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf46 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
triton_poi_fused_transpose_9[grid(65536)](buf2, buf46, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf47 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_10[grid(65536)](buf0, buf47, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf48 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_10[grid(65536)](buf1, buf48, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf49 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
triton_poi_fused_transpose_10[grid(65536)](buf2, buf49, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf50 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_11[grid(65536)](buf0, buf50, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del buf0
buf51 = empty_strided_cuda((4, 16384, 1), (16384, 1, 16384), torch.
float32)
triton_poi_fused_transpose_11[grid(65536)](buf1, buf51, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del buf1
buf52 = empty_strided_cuda((4, 1, 16384), (16384, 16384, 1), torch.
float32)
triton_poi_fused_transpose_11[grid(65536)](buf2, buf52, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del buf2
return (buf28, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, buf19, buf20, reinterpret_tensor(buf24, (4, 8), (8, 1),
0), reinterpret_tensor(buf27, (4, 8), (8, 1), 0), buf17, buf29,
buf30, buf31, buf15, buf32, buf33, buf34, buf13, buf35, buf36,
buf37, buf11, buf38, buf39, buf40, buf9, buf41, buf42, buf43, buf7,
buf44, buf45, buf46, buf5, buf47, buf48, buf49, buf3, buf50, buf51,
buf52)
class SpatialCGNLNew(nn.Module):
"""Spatial CGNL block with dot production kernel for image classfication.
"""
def __init__(self, inplanes, planes, use_scale=False, groups=8):
self.use_scale = use_scale
self.groups = groups
super(SpatialCGNLNew, self).__init__()
self.t = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=
False)
self.p = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=
False)
self.g = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=
False)
self.z = nn.Conv2d(planes, inplanes, kernel_size=1, stride=1,
groups=self.groups, bias=False)
self.gn = nn.GroupNorm(num_groups=self.groups, num_channels=inplanes)
if self.use_scale:
None
if self.groups:
None
def kernel(self, t, p, g, b, c, h, w):
"""The linear kernel (dot production).
Args:
t: output of conv theata
p: output of conv phi
g: output of conv g
b: batch size
c: channels number
h: height of featuremaps
w: width of featuremaps
"""
t = t.view(b, 1, c * h * w)
p = p.view(b, 1, c * h * w)
g = g.view(b, c * h * w, 1)
att = torch.bmm(p, g)
if self.use_scale:
att = att.div((c * h * w) ** 0.5)
x = torch.bmm(att, t)
x = x.view(b, c, h, w)
return x
def forward(self, input_0):
primals_2 = self.t.weight
primals_3 = self.p.weight
primals_4 = self.g.weight
primals_5 = self.z.weight
primals_6 = self.gn.weight
primals_7 = self.gn.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| zj1008/GALD-DGCNet | SpatialCGNL | false | 16,853 | [
"MIT"
] | 127 | be7ebfe2b3d28ea28a2b4714852999d4af2a785e | https://github.com/zj1008/GALD-DGCNet/tree/be7ebfe2b3d28ea28a2b4714852999d4af2a785e |
MultiheadedAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py
# Topologically Sorted Source Nodes: [QKt], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# QKt => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bs/cbsluabtq7ll426nybkislhh3cajm6f7ggrxam362hohynwnvtk6.py
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
# Source node to ATen node mapping:
# eq => eq
# Graph fragment:
# %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%unsqueeze, 0), kwargs = {})
triton_poi_fused_eq_1 = async_compile.triton('triton_poi_fused_eq_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rw/crww6wbqpm2jr2qp3qfqi3ubi67be74coj6dsxg4o6nxmu7jeftd.py
# Topologically Sorted Source Nodes: [sm_input_1, softmax], Original ATen: [aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# sm_input_1 => full_default_1, where
# softmax => amax, exp, sub, sum_1
# Graph fragment:
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %view_11), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_masked_fill_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp5 = tl.load(in_ptr1 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp13 = tl.load(in_ptr1 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp2 = float("-inf")
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp6 = tl.where(tmp4, tmp2, tmp5)
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp10 = tl.where(tmp8, tmp2, tmp9)
tmp11 = triton_helpers.maximum(tmp7, tmp10)
tmp14 = tl.where(tmp12, tmp2, tmp13)
tmp15 = triton_helpers.maximum(tmp11, tmp14)
tmp16 = tmp3 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp6 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp10 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + (x3), tmp15, xmask)
tl.store(out_ptr1 + (x3), tmp26, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/up/cup2aw3mw5reuhjseq2bibnvsl4q3xvduh7zvibj7jtcenqa2hi5.py
# Topologically Sorted Source Nodes: [sm_input_1, softmax], Original ATen: [aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# sm_input_1 => full_default_1, where
# softmax => amax, div_1, exp, sub
# Graph fragment:
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %view_11), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_masked_fill_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 64)
x4 = xindex % 16
x5 = xindex
x6 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x4 + (16*x3)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x5), xmask)
tmp4 = tl.load(in_ptr1 + (x6), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (x6), xmask, eviction_policy='evict_last')
tmp2 = float("-inf")
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tl.store(in_out_ptr0 + (x5), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [QKt], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [QKt], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [QKt], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
triton_poi_fused_eq_1.run(primals_10, buf6, 64, grid=grid(64), stream=stream0)
del primals_10
buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [sm_input_1, softmax], Original ATen: [aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_masked_fill_2.run(buf6, buf5, buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [sm_input_1, softmax], Original ATen: [aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_masked_fill_3.run(buf9, buf6, buf7, buf8, 256, grid=grid(256), stream=stream0)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_8, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [Q_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_12
return (reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), buf6, buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), primals_11, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
from torch.utils import tensorboard as tensorboard
def attention(Q, K, V, mask, dropout=None):
d_k = Q.size(-1)
QKt = Q.matmul(K.transpose(-1, -2))
sm_input = QKt / np.sqrt(d_k)
if mask is not None:
sm_input = sm_input.masked_fill(mask == 0, -float('inf'))
softmax = F.softmax(sm_input, dim=-1)
out = softmax.matmul(V)
if dropout is not None:
out = dropout(out)
return out
class MultiheadedAttention(nn.Module):
def __init__(self, d_model_Q, d_model_K, d_model_V, H, dout_p=0.0,
d_model=None):
super(MultiheadedAttention, self).__init__()
self.d_model_Q = d_model_Q
self.d_model_K = d_model_K
self.d_model_V = d_model_V
self.H = H
self.d_model = d_model
self.dout_p = dout_p
if self.d_model is None:
None
self.d_model = self.d_model_Q
self.d_k = self.d_model // H
self.linear_Q2d = nn.Linear(self.d_model_Q, self.d_model)
self.linear_K2d = nn.Linear(self.d_model_K, self.d_model)
self.linear_V2d = nn.Linear(self.d_model_V, self.d_model)
self.linear_d2Q = nn.Linear(self.d_model, self.d_model_Q)
self.dropout = nn.Dropout(self.dout_p)
assert self.d_model % H == 0
def forward(self, Q, K, V, mask):
"""
Q, K, V: (B, Sq, Dq), (B, Sk, Dk), (B, Sv, Dv)
mask: (B, 1, Sk)
Sk = Sv,
Dk != self.d_k
Also: m1 is the target modality (queries); m2 is the source modality (keys, values)
"""
B, Sq, _d_model_Q = Q.shape
Q = self.linear_Q2d(Q)
K = self.linear_K2d(K)
V = self.linear_V2d(V)
Q = Q.view(B, -1, self.H, self.d_k).transpose(-3, -2)
K = K.view(B, -1, self.H, self.d_k).transpose(-3, -2)
V = V.view(B, -1, self.H, self.d_k).transpose(-3, -2)
if mask is not None:
mask = mask.unsqueeze(1)
Q = attention(Q, K, V, mask, self.dropout)
Q = Q.transpose(-3, -2).contiguous().view(B, Sq, self.d_model)
Q = self.linear_d2Q(Q)
return Q
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model_Q': 4, 'd_model_K': 4, 'd_model_V': 4, 'H': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
from torch.utils import tensorboard as tensorboard
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last').to(tl.int1)
tmp5 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp13 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp2 = float('-inf')
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp6 = tl.where(tmp4, tmp2, tmp5)
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp10 = tl.where(tmp8, tmp2, tmp9)
tmp11 = triton_helpers.maximum(tmp7, tmp10)
tmp14 = tl.where(tmp12, tmp2, tmp13)
tmp15 = triton_helpers.maximum(tmp11, tmp14)
tmp16 = tmp3 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp6 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp10 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp26, xmask)
@triton.jit
def triton_poi_fused__softmax_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 16
x5 = xindex
x6 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + x5, xmask)
tmp4 = tl.load(in_ptr1 + x6, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last')
tmp2 = float('-inf')
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tl.store(in_out_ptr0 + x5, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_eq_1[grid(64)](primals_10, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_10
buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_masked_fill_2[grid(64)](buf6, buf5, buf7,
buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_masked_fill_3[grid(256)](buf9, buf6, buf7,
buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_8, buf10, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_12
return reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf6, buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0
), primals_11, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
def attention(Q, K, V, mask, dropout=None):
d_k = Q.size(-1)
QKt = Q.matmul(K.transpose(-1, -2))
sm_input = QKt / np.sqrt(d_k)
if mask is not None:
sm_input = sm_input.masked_fill(mask == 0, -float('inf'))
softmax = F.softmax(sm_input, dim=-1)
out = softmax.matmul(V)
if dropout is not None:
out = dropout(out)
return out
class MultiheadedAttentionNew(nn.Module):
def __init__(self, d_model_Q, d_model_K, d_model_V, H, dout_p=0.0,
d_model=None):
super(MultiheadedAttentionNew, self).__init__()
self.d_model_Q = d_model_Q
self.d_model_K = d_model_K
self.d_model_V = d_model_V
self.H = H
self.d_model = d_model
self.dout_p = dout_p
if self.d_model is None:
None
self.d_model = self.d_model_Q
self.d_k = self.d_model // H
self.linear_Q2d = nn.Linear(self.d_model_Q, self.d_model)
self.linear_K2d = nn.Linear(self.d_model_K, self.d_model)
self.linear_V2d = nn.Linear(self.d_model_V, self.d_model)
self.linear_d2Q = nn.Linear(self.d_model, self.d_model_Q)
self.dropout = nn.Dropout(self.dout_p)
assert self.d_model % H == 0
def forward(self, input_0, input_1, input_2, input_3):
primals_2 = self.linear_Q2d.weight
primals_3 = self.linear_Q2d.bias
primals_4 = self.linear_K2d.weight
primals_5 = self.linear_K2d.bias
primals_7 = self.linear_V2d.weight
primals_8 = self.linear_V2d.bias
primals_11 = self.linear_d2Q.weight
primals_12 = self.linear_d2Q.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
primals_10 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| valterlej/CustomBMT | MultiheadedAttention | false | 16,854 | [
"MIT"
] | 157 | c9326752d1355c81f845f2caab9c047be76067de | https://github.com/valterlej/CustomBMT/tree/c9326752d1355c81f845f2caab9c047be76067de |
SinkhornDistance | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wn/cwnh6wayuqfrqlhl7oz3ni2p3a66r6ulciyusnckjvehorssxeqq.py
# Topologically Sorted Source Nodes: [logsumexp], Original ATen: [aten.logsumexp]
# Source node to ATen node mapping:
# logsumexp => abs_1, amax, eq, full_default, sub, where
# Graph fragment:
# %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%permute, [-1], True), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax,), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_1, inf), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %amax), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %where), kwargs = {})
triton_poi_fused_logsumexp_0 = async_compile.triton('triton_poi_fused_logsumexp_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_logsumexp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_logsumexp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x0 = xindex % 4
x2 = (xindex // 16) % 16
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = -tmp0
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp3 + tmp2
tmp5 = 1000.0
tmp6 = tmp4 * tmp5
tmp8 = -tmp7
tmp9 = tmp8 + tmp2
tmp10 = tmp9 + tmp2
tmp11 = tmp10 * tmp5
tmp13 = -tmp12
tmp14 = tmp13 + tmp2
tmp15 = tmp14 + tmp2
tmp16 = tmp15 * tmp5
tmp17 = triton_helpers.maximum(tmp11, tmp16)
tmp19 = -tmp18
tmp20 = tmp19 + tmp2
tmp21 = tmp20 + tmp2
tmp22 = tmp21 * tmp5
tmp23 = triton_helpers.maximum(tmp17, tmp22)
tmp25 = -tmp24
tmp26 = tmp25 + tmp2
tmp27 = tmp26 + tmp2
tmp28 = tmp27 * tmp5
tmp29 = triton_helpers.maximum(tmp23, tmp28)
tmp30 = tl_math.abs(tmp29)
tmp31 = float("inf")
tmp32 = tmp30 == tmp31
tmp33 = 0.0
tmp34 = tl.where(tmp32, tmp33, tmp29)
tmp35 = tmp6 - tmp34
tl.store(out_ptr0 + (x5), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/k6/ck6ny2i4ti5xdxsrlzqwk3o4ajv4a623zyfnerybeogf4hrbgceq.py
# Topologically Sorted Source Nodes: [logsumexp], Original ATen: [aten.logsumexp]
# Source node to ATen node mapping:
# logsumexp => add_3, exp, log_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1]), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_1, %squeeze), kwargs = {})
triton_poi_fused_logsumexp_1 = async_compile.triton('triton_poi_fused_logsumexp_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_logsumexp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_logsumexp_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = (xindex // 4)
x1 = (xindex // 4) % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x3)), xmask)
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x3)), xmask)
tmp5 = tl.load(in_ptr0 + (8 + x0 + (16*x3)), xmask)
tmp8 = tl.load(in_ptr0 + (12 + x0 + (16*x3)), xmask)
tmp12 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp13 = -tmp12
tmp14 = 1.0
tmp15 = tmp13 + tmp14
tmp16 = tmp15 + tmp14
tmp17 = 1000.0
tmp18 = tmp16 * tmp17
tmp20 = -tmp19
tmp21 = tmp20 + tmp14
tmp22 = tmp21 + tmp14
tmp23 = tmp22 * tmp17
tmp24 = triton_helpers.maximum(tmp18, tmp23)
tmp26 = -tmp25
tmp27 = tmp26 + tmp14
tmp28 = tmp27 + tmp14
tmp29 = tmp28 * tmp17
tmp30 = triton_helpers.maximum(tmp24, tmp29)
tmp32 = -tmp31
tmp33 = tmp32 + tmp14
tmp34 = tmp33 + tmp14
tmp35 = tmp34 * tmp17
tmp36 = triton_helpers.maximum(tmp30, tmp35)
tmp37 = tl_math.abs(tmp36)
tmp38 = float("inf")
tmp39 = tmp37 == tmp38
tmp40 = 0.0
tmp41 = tl.where(tmp39, tmp40, tmp36)
tmp42 = tmp11 + tmp41
tl.store(out_ptr0 + (x4), tmp42, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6r/c6rqzfcgccq5u5nv6ojqtpt6sirtpuotciz4n2wwpnq5a5d22mjl.py
# Topologically Sorted Source Nodes: [neg_1, add_5, add_6, truediv_1, logsumexp_1], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
# Source node to ATen node mapping:
# add_5 => add_6
# add_6 => add_7
# logsumexp_1 => abs_2, amax_1, eq_1, exp_1, full_default_1, sub_2, sum_2, where_1
# neg_1 => neg_1
# truediv_1 => div_1
# Graph fragment:
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg2_1,), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_1, %unsqueeze_2), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %unsqueeze_3), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_7, 0.001), kwargs = {})
# %amax_1 : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%div_1, [-1], True), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax_1,), kwargs = {})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_2, inf), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_1, %amax_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_1, %where_1), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1]), kwargs = {})
triton_poi_fused_add_div_logsumexp_neg_2 = async_compile.triton('triton_poi_fused_add_div_logsumexp_neg_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_logsumexp_neg_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_logsumexp_neg_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x4 = (xindex // 4)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4*x4), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (4*x4), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (1 + (4*x4)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (1 + (4*x4)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr1 + (2 + (4*x4)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr2 + (2 + (4*x4)), xmask, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr1 + (3 + (4*x4)), xmask, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr2 + (3 + (4*x4)), xmask, eviction_policy='evict_last')
tmp1 = -tmp0
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp5 = 1e-08
tmp6 = tmp4 + tmp5
tmp7 = tl_math.log(tmp6)
tmp9 = tmp7 - tmp8
tmp10 = 0.001
tmp11 = tmp9 * tmp10
tmp12 = tmp11 + tmp2
tmp13 = tmp3 + tmp12
tmp14 = 1000.0
tmp15 = tmp13 * tmp14
tmp17 = -tmp16
tmp18 = tmp17 + tmp2
tmp20 = tmp19 + tmp5
tmp21 = tl_math.log(tmp20)
tmp23 = tmp21 - tmp22
tmp24 = tmp23 * tmp10
tmp25 = tmp24 + tmp2
tmp26 = tmp18 + tmp25
tmp27 = tmp26 * tmp14
tmp28 = triton_helpers.maximum(tmp15, tmp27)
tmp30 = -tmp29
tmp31 = tmp30 + tmp2
tmp33 = tmp32 + tmp5
tmp34 = tl_math.log(tmp33)
tmp36 = tmp34 - tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp37 + tmp2
tmp39 = tmp31 + tmp38
tmp40 = tmp39 * tmp14
tmp41 = triton_helpers.maximum(tmp28, tmp40)
tmp43 = -tmp42
tmp44 = tmp43 + tmp2
tmp46 = tmp45 + tmp5
tmp47 = tl_math.log(tmp46)
tmp49 = tmp47 - tmp48
tmp50 = tmp49 * tmp10
tmp51 = tmp50 + tmp2
tmp52 = tmp44 + tmp51
tmp53 = tmp52 * tmp14
tmp54 = triton_helpers.maximum(tmp41, tmp53)
tmp55 = tl_math.abs(tmp54)
tmp56 = float("inf")
tmp57 = tmp55 == tmp56
tmp58 = 0.0
tmp59 = tl.where(tmp57, tmp58, tmp54)
tmp60 = tmp15 - tmp59
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp27 - tmp59
tmp63 = tl_math.exp(tmp62)
tmp64 = tmp61 + tmp63
tmp65 = tmp40 - tmp59
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp64 + tmp66
tmp68 = tmp53 - tmp59
tmp69 = tl_math.exp(tmp68)
tmp70 = tmp67 + tmp69
tl.store(out_ptr0 + (x5), tmp54, xmask)
tl.store(out_ptr1 + (x5), tmp70, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ze/czea7uhv6cskvq7io7obvzyku3nngbzgr2loac7umqp4ezkzxxmo.py
# Topologically Sorted Source Nodes: [neg_2, add_9, add_10, truediv_2], Original ATen: [aten.neg, aten.add, aten.div]
# Source node to ATen node mapping:
# add_10 => add_12
# add_9 => add_11
# neg_2 => neg_2
# truediv_2 => div_2
# Graph fragment:
# %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg2_1,), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_2, %unsqueeze_4), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %unsqueeze_5), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_12, 0.001), kwargs = {})
triton_poi_fused_add_div_neg_3 = async_compile.triton('triton_poi_fused_add_div_neg_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_neg_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_neg_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = (xindex // 4)
x0 = xindex % 4
x6 = (xindex // 16)
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr4 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr5 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float("inf")
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp18 = 1.0
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp22 = tmp21 + tmp3
tmp23 = tl_math.log(tmp22)
tmp25 = tmp23 - tmp24
tmp26 = tmp25 * tmp16
tmp27 = tmp26 + tmp18
tmp28 = tmp20 + tmp27
tmp29 = 1000.0
tmp30 = tmp28 * tmp29
tl.store(out_ptr0 + (x7), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hv/chvwmrpjh6iovjxnnhumvrkgigm26ipe67ftmtnsszaagjuhvg3f.py
# Topologically Sorted Source Nodes: [add_8, log_2, logsumexp_2, sub_2, mul_2], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
# Source node to ATen node mapping:
# add_8 => add_10
# log_2 => log_4
# logsumexp_2 => abs_3, add_13, amax_2, eq_2, exp_2, full_default_2, log_5, sub_4, sum_3, where_2
# mul_2 => mul_2
# sub_2 => sub_5
# Graph fragment:
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 1e-08), kwargs = {})
# %log_4 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_10,), kwargs = {})
# %amax_2 : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%permute_1, [-1], True), kwargs = {})
# %abs_3 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax_2,), kwargs = {})
# %eq_2 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_3, inf), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_2, %full_default_2, %amax_2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute_1, %where_2), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [-1]), kwargs = {})
# %log_5 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_3,), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_5, %squeeze_2), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_4, %add_13), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, 0.001), kwargs = {})
triton_poi_fused_add_log_logsumexp_mul_sub_4 = async_compile.triton('triton_poi_fused_add_log_logsumexp_mul_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_log_logsumexp_mul_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_sub_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask)
tmp9 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = float("inf")
tmp13 = tmp11 == tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = tmp4 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp5 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp9 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp27 + tmp15
tmp29 = tmp3 - tmp28
tmp30 = 0.001
tmp31 = tmp29 * tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7n/c7nddu4g3wl4os5pcxdjtt4dakrjo3zmnfcx7j2dpuhnhoyh5smh.py
# Topologically Sorted Source Nodes: [neg_3, add_13, add_14], Original ATen: [aten.neg, aten.add]
# Source node to ATen node mapping:
# add_13 => add_16
# add_14 => add_17
# neg_3 => neg_3
# Graph fragment:
# %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg2_1,), kwargs = {})
# %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_3, %unsqueeze_6), kwargs = {})
# %add_17 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_16, %unsqueeze_7), kwargs = {})
triton_poi_fused_add_neg_5 = async_compile.triton('triton_poi_fused_add_neg_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_neg_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_neg_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = (xindex // 4)
x0 = xindex % 4
x6 = (xindex // 16)
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr4 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr5 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float("inf")
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp18 = 1.0
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp23 = tmp22 + tmp3
tmp24 = tl_math.log(tmp23)
tmp26 = tmp24 - tmp25
tmp27 = tmp26 * tmp16
tmp28 = tmp27 + tmp18
tmp29 = tmp21 + tmp28
tmp30 = tmp20 + tmp29
tl.store(out_ptr0 + (x7), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bi/cbixxetzrqx4nrw2ahqrntjc2hteggxk46fvxehrnxg3yu6nftdi.py
# Topologically Sorted Source Nodes: [add_12, log_3, truediv_3, logsumexp_3, sub_3], Original ATen: [aten.add, aten.log, aten.div, aten.logsumexp, aten.sub]
# Source node to ATen node mapping:
# add_12 => add_15
# log_3 => log_6
# logsumexp_3 => abs_4, add_18, amax_3, eq_3, exp_3, full_default_3, log_7, sub_6, sum_4, where_3
# sub_3 => sub_7
# truediv_3 => div_3
# Graph fragment:
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-08), kwargs = {})
# %log_6 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_15,), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_17, 0.001), kwargs = {})
# %amax_3 : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%div_3, [-1], True), kwargs = {})
# %abs_4 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax_3,), kwargs = {})
# %eq_3 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_4, inf), kwargs = {})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_3, %full_default_3, %amax_3), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_3, %where_3), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_6,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [-1]), kwargs = {})
# %log_7 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_4,), kwargs = {})
# %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_7, %squeeze_3), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_6, %add_18), kwargs = {})
triton_poi_fused_add_div_log_logsumexp_sub_6 = async_compile.triton('triton_poi_fused_add_div_log_logsumexp_sub_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_logsumexp_sub_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_log_logsumexp_sub_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp5 = 1000.0
tmp6 = tmp4 * tmp5
tmp8 = tmp7 * tmp5
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10 * tmp5
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp5
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = float("inf")
tmp18 = tmp16 == tmp17
tmp19 = 0.0
tmp20 = tl.where(tmp18, tmp19, tmp15)
tmp21 = tmp6 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp8 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp11 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp14 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tl_math.log(tmp31)
tmp33 = tmp32 + tmp20
tmp34 = tmp3 - tmp33
tl.store(out_ptr0 + (x0), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ca/cca6rupfie6mlwsla27ymcldhzidikxwjidfhdzssqpb3ftkl23h.py
# Topologically Sorted Source Nodes: [neg_4, add_17, add_18], Original ATen: [aten.neg, aten.add]
# Source node to ATen node mapping:
# add_17 => add_21
# add_18 => add_22
# neg_4 => neg_4
# Graph fragment:
# %neg_4 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg2_1,), kwargs = {})
# %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_4, %unsqueeze_8), kwargs = {})
# %add_22 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_21, %unsqueeze_9), kwargs = {})
triton_poi_fused_add_neg_7 = async_compile.triton('triton_poi_fused_add_neg_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_neg_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_neg_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = (xindex // 4)
x0 = xindex % 4
x6 = (xindex // 16)
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x5), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr5 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr7 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp1 = -tmp0
tmp3 = 0.001
tmp4 = tmp2 * tmp3
tmp6 = 1e-08
tmp7 = tmp5 + tmp6
tmp8 = tl_math.log(tmp7)
tmp10 = tl_math.log(tmp9)
tmp12 = tl_math.abs(tmp11)
tmp13 = float("inf")
tmp14 = tmp12 == tmp13
tmp15 = 0.0
tmp16 = tl.where(tmp14, tmp15, tmp11)
tmp17 = tmp10 + tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp3
tmp20 = 1.0
tmp21 = tmp19 + tmp20
tmp22 = tmp4 + tmp21
tmp23 = tmp1 + tmp22
tmp26 = tmp25 + tmp6
tmp27 = tl_math.log(tmp26)
tmp29 = tmp27 - tmp28
tmp30 = tmp29 * tmp3
tmp31 = tmp30 + tmp20
tmp32 = tmp24 + tmp31
tmp33 = tmp23 + tmp32
tl.store(out_ptr0 + (x7), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gz/cgzulrk3x4igaueedi6oycxwbq53a6bzda7g7pe3zd6ewa6ovidq.py
# Topologically Sorted Source Nodes: [add_16, log_4, logsumexp_4, sub_4], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub]
# Source node to ATen node mapping:
# add_16 => add_20
# log_4 => log_8
# logsumexp_4 => abs_5, add_23, amax_4, eq_4, exp_4, full_default_4, log_9, sub_8, sum_5, where_4
# sub_4 => sub_9
# Graph fragment:
# %add_20 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 1e-08), kwargs = {})
# %log_8 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_20,), kwargs = {})
# %amax_4 : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%permute_2, [-1], True), kwargs = {})
# %abs_5 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax_4,), kwargs = {})
# %eq_4 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_5, inf), kwargs = {})
# %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_4, %full_default_4, %amax_4), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute_2, %where_4), kwargs = {})
# %exp_4 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_8,), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_4, [-1]), kwargs = {})
# %log_9 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_5,), kwargs = {})
# %add_23 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_9, %squeeze_4), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_8, %add_23), kwargs = {})
triton_poi_fused_add_log_logsumexp_sub_8 = async_compile.triton('triton_poi_fused_add_log_logsumexp_sub_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_log_logsumexp_sub_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_log_logsumexp_sub_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask)
tmp10 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask)
tmp13 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp5 = 1000.0
tmp6 = tmp4 * tmp5
tmp8 = tmp7 * tmp5
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10 * tmp5
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp5
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = float("inf")
tmp18 = tmp16 == tmp17
tmp19 = 0.0
tmp20 = tl.where(tmp18, tmp19, tmp15)
tmp21 = tmp6 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp8 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp11 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp14 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tl_math.log(tmp31)
tmp33 = tmp32 + tmp20
tmp34 = tmp3 - tmp33
tl.store(out_ptr0 + (x2), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fr/cfrzf3dzodfhol34gqnh3lgbfu3meen4owwolqxv76dlogteiyxr.py
# Topologically Sorted Source Nodes: [neg_5, add_21, add_22], Original ATen: [aten.neg, aten.add]
# Source node to ATen node mapping:
# add_21 => add_26
# add_22 => add_27
# neg_5 => neg_5
# Graph fragment:
# %neg_5 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg2_1,), kwargs = {})
# %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_5, %unsqueeze_10), kwargs = {})
# %add_27 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_26, %unsqueeze_11), kwargs = {})
triton_poi_fused_add_neg_9 = async_compile.triton('triton_poi_fused_add_neg_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_neg_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_neg_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = (xindex // 4)
x0 = xindex % 4
x6 = (xindex // 16)
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x5), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr5 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr6 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr7 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr8 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp1 = -tmp0
tmp3 = 0.001
tmp4 = tmp2 * tmp3
tmp6 = 1e-08
tmp7 = tmp5 + tmp6
tmp8 = tl_math.log(tmp7)
tmp10 = tl_math.log(tmp9)
tmp12 = tl_math.abs(tmp11)
tmp13 = float("inf")
tmp14 = tmp12 == tmp13
tmp15 = 0.0
tmp16 = tl.where(tmp14, tmp15, tmp11)
tmp17 = tmp10 + tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp3
tmp20 = 1.0
tmp21 = tmp19 + tmp20
tmp22 = tmp4 + tmp21
tmp23 = tmp1 + tmp22
tmp25 = tmp24 * tmp3
tmp28 = tmp27 + tmp6
tmp29 = tl_math.log(tmp28)
tmp31 = tmp29 - tmp30
tmp32 = tmp31 * tmp3
tmp33 = tmp32 + tmp20
tmp34 = tmp26 + tmp33
tmp35 = tmp25 + tmp34
tmp36 = tmp23 + tmp35
tl.store(out_ptr0 + (x7), tmp36, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/oc/coca7wqtnc3a7ocfrkrf7hsw67zgc4ajcedofxuxw36bia66daws.py
# Topologically Sorted Source Nodes: [add_20, log_5, add_4, log_1, u, logsumexp_1, sub_1, mul_1, u_1, mul_3, u_2, truediv_5, logsumexp_5, sub_5, mul_5, u_3], Original ATen: [aten.add, aten.log, aten.ones_like, aten.logsumexp, aten.sub, aten.mul, aten.div]
# Source node to ATen node mapping:
# add_20 => add_25
# add_4 => add_5
# log_1 => log_2
# log_5 => log_10
# logsumexp_1 => add_8, log_3
# logsumexp_5 => abs_6, add_28, amax_5, eq_5, exp_5, full_default_5, log_11, sub_10, sum_6, where_5
# mul_1 => mul_1
# mul_3 => mul_3
# mul_5 => mul_5
# sub_1 => sub_3
# sub_5 => sub_11
# truediv_5 => div_5
# u => full
# u_1 => add_9
# u_2 => add_19
# u_3 => add_29
# Graph fragment:
# %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-08), kwargs = {})
# %log_10 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_25,), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-08), kwargs = {})
# %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_5,), kwargs = {})
# %full : [num_users=3] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_3, %squeeze_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_2, %add_8), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 0.001), kwargs = {})
# %add_9 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %full), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, 0.001), kwargs = {})
# %add_19 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %add_9), kwargs = {})
# %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_27, 0.001), kwargs = {})
# %amax_5 : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%div_5, [-1], True), kwargs = {})
# %abs_6 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax_5,), kwargs = {})
# %eq_5 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_6, inf), kwargs = {})
# %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_5, %full_default_5, %amax_5), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_5, %where_5), kwargs = {})
# %exp_5 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_10,), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_5, [-1]), kwargs = {})
# %log_11 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_6,), kwargs = {})
# %add_28 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_11, %squeeze_5), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_10, %add_28), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, 0.001), kwargs = {})
# %add_29 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %add_19), kwargs = {})
triton_poi_fused_add_div_log_logsumexp_mul_ones_like_sub_10 = async_compile.triton('triton_poi_fused_add_div_log_logsumexp_mul_ones_like_sub_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_logsumexp_mul_ones_like_sub_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_log_logsumexp_mul_ones_like_sub_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr2 + (x0), xmask)
tmp39 = tl.load(in_ptr3 + (x0), xmask)
tmp41 = tl.load(in_ptr4 + (x0), xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp5 = 1000.0
tmp6 = tmp4 * tmp5
tmp8 = tmp7 * tmp5
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10 * tmp5
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp5
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = float("inf")
tmp18 = tmp16 == tmp17
tmp19 = 0.0
tmp20 = tl.where(tmp18, tmp19, tmp15)
tmp21 = tmp6 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp8 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp11 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp14 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tl_math.log(tmp31)
tmp33 = tmp32 + tmp20
tmp34 = tmp3 - tmp33
tmp35 = 0.001
tmp36 = tmp34 * tmp35
tmp38 = tmp37 * tmp35
tmp40 = tl_math.log(tmp39)
tmp42 = tl_math.abs(tmp41)
tmp43 = tmp42 == tmp17
tmp44 = tl.where(tmp43, tmp19, tmp41)
tmp45 = tmp40 + tmp44
tmp46 = tmp3 - tmp45
tmp47 = tmp46 * tmp35
tmp48 = 1.0
tmp49 = tmp47 + tmp48
tmp50 = tmp38 + tmp49
tmp51 = tmp36 + tmp50
tl.store(in_out_ptr0 + (x0), tmp51, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mj/cmjw34ke5wd4x3bj6mevqs4p6nmbfsdozybw3yqw4omfpgxl23rl.py
# Topologically Sorted Source Nodes: [neg_6, add_25, add_26, truediv_6], Original ATen: [aten.neg, aten.add, aten.div]
# Source node to ATen node mapping:
# add_25 => add_31
# add_26 => add_32
# neg_6 => neg_6
# truediv_6 => div_6
# Graph fragment:
# %neg_6 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg2_1,), kwargs = {})
# %add_31 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_6, %unsqueeze_12), kwargs = {})
# %add_32 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_31, %unsqueeze_13), kwargs = {})
# %div_6 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_32, 0.001), kwargs = {})
triton_poi_fused_add_div_neg_11 = async_compile.triton('triton_poi_fused_add_div_neg_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_neg_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_neg_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = (xindex // 4)
x0 = xindex % 4
x6 = (xindex // 16)
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp1 = -tmp0
tmp3 = tmp1 + tmp2
tmp5 = 0.001
tmp6 = tmp4 * tmp5
tmp9 = 1e-08
tmp10 = tmp8 + tmp9
tmp11 = tl_math.log(tmp10)
tmp13 = tmp11 - tmp12
tmp14 = tmp13 * tmp5
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = tmp7 + tmp16
tmp18 = tmp6 + tmp17
tmp19 = tmp3 + tmp18
tmp20 = 1000.0
tmp21 = tmp19 * tmp20
tl.store(out_ptr0 + (x7), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6a/c6aymikoc7tbbul6sz4odmaxfvudjkybpbx4x5ppjfkzf5pw5uyr.py
# Topologically Sorted Source Nodes: [add, log, v, sub, mul, v_1, v_2, mul_4, v_3, add_24, log_6, logsumexp_6, sub_6, mul_6, v_4], Original ATen: [aten.add, aten.log, aten.ones_like, aten.sub, aten.mul, aten.logsumexp]
# Source node to ATen node mapping:
# add => add
# add_24 => add_30
# log => log
# log_6 => log_12
# logsumexp_6 => abs_7, add_33, amax_6, eq_6, exp_6, full_default_6, log_13, sub_12, sum_7, where_6
# mul => mul
# mul_4 => mul_4
# mul_6 => mul_6
# sub => sub_1
# sub_6 => sub_13
# v => full_1
# v_1 => add_4
# v_2 => add_14
# v_3 => add_24
# v_4 => add_34
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 1e-08), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %full_1 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log, %add_3), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 0.001), kwargs = {})
# %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %full_1), kwargs = {})
# %add_14 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %add_4), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, 0.001), kwargs = {})
# %add_24 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %add_14), kwargs = {})
# %add_30 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 1e-08), kwargs = {})
# %log_12 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_30,), kwargs = {})
# %amax_6 : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%permute_3, [-1], True), kwargs = {})
# %abs_7 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax_6,), kwargs = {})
# %eq_6 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_7, inf), kwargs = {})
# %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_6, %full_default_6, %amax_6), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute_3, %where_6), kwargs = {})
# %exp_6 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_12,), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_6, [-1]), kwargs = {})
# %log_13 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_7,), kwargs = {})
# %add_33 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_13, %squeeze_6), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_12, %add_33), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, 0.001), kwargs = {})
# %add_34 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %add_24), kwargs = {})
triton_poi_fused_add_log_logsumexp_mul_ones_like_sub_12 = async_compile.triton('triton_poi_fused_add_log_logsumexp_mul_ones_like_sub_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_log_logsumexp_mul_ones_like_sub_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_ones_like_sub_12(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask)
tmp9 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask)
tmp32 = tl.load(in_ptr2 + (x2), xmask)
tmp34 = tl.load(in_ptr3 + (x2), xmask)
tmp35 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = float("inf")
tmp13 = tmp11 == tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = tmp4 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp5 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp9 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp27 + tmp15
tmp29 = tmp3 - tmp28
tmp30 = 0.001
tmp31 = tmp29 * tmp30
tmp33 = tmp32 * tmp30
tmp36 = tmp3 - tmp35
tmp37 = tmp36 * tmp30
tmp38 = 1.0
tmp39 = tmp37 + tmp38
tmp40 = tmp34 + tmp39
tmp41 = tmp33 + tmp40
tmp42 = tmp31 + tmp41
tl.store(in_out_ptr0 + (x2), tmp42, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4l/c4lai2yycampjk7i6ecg773idwngnfmdni3nk2mn3226hgxy56ys.py
# Topologically Sorted Source Nodes: [neg_7, add_29, add_30, truediv_7, logsumexp_7], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
# Source node to ATen node mapping:
# add_29 => add_36
# add_30 => add_37
# logsumexp_7 => abs_8, amax_7, eq_7, exp_7, full_default_7, sub_14, sum_8, where_7
# neg_7 => neg_7
# truediv_7 => div_7
# Graph fragment:
# %neg_7 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg2_1,), kwargs = {})
# %add_36 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_7, %unsqueeze_14), kwargs = {})
# %add_37 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_36, %unsqueeze_15), kwargs = {})
# %div_7 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_37, 0.001), kwargs = {})
# %amax_7 : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%div_7, [-1], True), kwargs = {})
# %abs_8 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax_7,), kwargs = {})
# %eq_7 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_8, inf), kwargs = {})
# %full_default_7 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_7, %full_default_7, %amax_7), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_7, %where_7), kwargs = {})
# %exp_7 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_14,), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_7, [-1]), kwargs = {})
triton_poi_fused_add_div_logsumexp_neg_13 = async_compile.triton('triton_poi_fused_add_div_logsumexp_neg_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_logsumexp_neg_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_logsumexp_neg_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x4 = xindex
x5 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x4), xmask)
tmp4 = tl.load(in_ptr2 + (4*x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (1 + (4*x5)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (2 + (4*x5)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (3 + (4*x5)), xmask, eviction_policy='evict_last')
tmp1 = -tmp0
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp6 = 1000.0
tmp7 = tmp5 * tmp6
tmp9 = -tmp8
tmp10 = tmp9 + tmp2
tmp12 = tmp10 + tmp11
tmp13 = tmp12 * tmp6
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp16 = -tmp15
tmp17 = tmp16 + tmp2
tmp19 = tmp17 + tmp18
tmp20 = tmp19 * tmp6
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp23 = -tmp22
tmp24 = tmp23 + tmp2
tmp26 = tmp24 + tmp25
tmp27 = tmp26 * tmp6
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tl_math.abs(tmp28)
tmp30 = float("inf")
tmp31 = tmp29 == tmp30
tmp32 = 0.0
tmp33 = tl.where(tmp31, tmp32, tmp28)
tmp34 = tmp7 - tmp33
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp13 - tmp33
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp35 + tmp37
tmp39 = tmp20 - tmp33
tmp40 = tl_math.exp(tmp39)
tmp41 = tmp38 + tmp40
tmp42 = tmp27 - tmp33
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp41 + tmp43
tl.store(out_ptr0 + (x4), tmp28, xmask)
tl.store(out_ptr1 + (x4), tmp44, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2u/c2u64bmzdzgqfkqew3bb6v7h6p5rzfe5yxkyqixptap5pzokklfw.py
# Topologically Sorted Source Nodes: [neg_8, add_33, add_34, truediv_8], Original ATen: [aten.neg, aten.add, aten.div]
# Source node to ATen node mapping:
# add_33 => add_41
# add_34 => add_42
# neg_8 => neg_8
# truediv_8 => div_8
# Graph fragment:
# %neg_8 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg2_1,), kwargs = {})
# %add_41 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_8, %unsqueeze_16), kwargs = {})
# %add_42 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_41, %unsqueeze_17), kwargs = {})
# %div_8 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_42, 0.001), kwargs = {})
triton_poi_fused_add_div_neg_14 = async_compile.triton('triton_poi_fused_add_div_neg_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_neg_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_neg_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = (xindex // 4)
x0 = xindex % 4
x6 = (xindex // 16)
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + (x5), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr5 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float("inf")
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = 1000.0
tmp24 = tmp22 * tmp23
tl.store(out_ptr0 + (x7), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6i/c6iqzu2sohlumuwlrnagpl43zfy5rslhjxg5ykeo47r34psnns3t.py
# Topologically Sorted Source Nodes: [neg_9, add_37, add_38, truediv_9], Original ATen: [aten.neg, aten.add, aten.div]
# Source node to ATen node mapping:
# add_37 => add_46
# add_38 => add_47
# neg_9 => neg_9
# truediv_9 => div_9
# Graph fragment:
# %neg_9 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg2_1,), kwargs = {})
# %add_46 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_9, %unsqueeze_18), kwargs = {})
# %add_47 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_46, %unsqueeze_19), kwargs = {})
# %div_9 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_47, 0.001), kwargs = {})
triton_poi_fused_add_div_neg_15 = async_compile.triton('triton_poi_fused_add_div_neg_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_neg_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_neg_15(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = (xindex // 4)
x0 = xindex % 4
x6 = (xindex // 16)
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + (x5), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr5 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr6 + (x0 + (4*x6)), xmask, eviction_policy='evict_last')
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float("inf")
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp23 = tmp21 + tmp22
tmp24 = tmp20 + tmp23
tmp25 = 1000.0
tmp26 = tmp24 * tmp25
tl.store(out_ptr0 + (x7), tmp26, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6w/c6w4ndukwj3ikc5ysiij6rwh3nfkomjlaoykrfwucgjefvpj2qhi.py
# Topologically Sorted Source Nodes: [add_36, log_9, add_28, log_7, logsumexp_7, sub_7, mul_7, u_4, logsumexp_9, sub_9, mul_9, u_5], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
# Source node to ATen node mapping:
# add_28 => add_35
# add_36 => add_45
# log_7 => log_14
# log_9 => log_18
# logsumexp_7 => add_38, log_15
# logsumexp_9 => abs_10, add_48, amax_9, eq_9, exp_9, full_default_9, log_19, sub_18, sum_10, where_9
# mul_7 => mul_7
# mul_9 => mul_9
# sub_7 => sub_15
# sub_9 => sub_19
# u_4 => add_39
# u_5 => add_49
# Graph fragment:
# %add_45 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-08), kwargs = {})
# %log_18 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_45,), kwargs = {})
# %add_35 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-08), kwargs = {})
# %log_14 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_35,), kwargs = {})
# %log_15 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_8,), kwargs = {})
# %add_38 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_15, %squeeze_7), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_14, %add_38), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_15, 0.001), kwargs = {})
# %add_39 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %add_29), kwargs = {})
# %amax_9 : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%div_9, [-1], True), kwargs = {})
# %abs_10 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax_9,), kwargs = {})
# %eq_9 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_10, inf), kwargs = {})
# %full_default_9 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_9, %full_default_9, %amax_9), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_9, %where_9), kwargs = {})
# %exp_9 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_18,), kwargs = {})
# %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_9, [-1]), kwargs = {})
# %log_19 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_10,), kwargs = {})
# %add_48 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_19, %squeeze_9), kwargs = {})
# %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_18, %add_48), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, 0.001), kwargs = {})
# %add_49 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %add_39), kwargs = {})
triton_poi_fused_add_log_logsumexp_mul_sub_16 = async_compile.triton('triton_poi_fused_add_log_logsumexp_mul_sub_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_log_logsumexp_mul_sub_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_sub_16(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (x0), xmask)
tmp34 = tl.load(in_ptr3 + (x0), xmask)
tmp41 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = float("inf")
tmp13 = tmp11 == tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = tmp4 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp5 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp9 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp27 + tmp15
tmp29 = tmp3 - tmp28
tmp30 = 0.001
tmp31 = tmp29 * tmp30
tmp33 = tl_math.log(tmp32)
tmp35 = tl_math.abs(tmp34)
tmp36 = tmp35 == tmp12
tmp37 = tl.where(tmp36, tmp14, tmp34)
tmp38 = tmp33 + tmp37
tmp39 = tmp3 - tmp38
tmp40 = tmp39 * tmp30
tmp42 = tmp40 + tmp41
tmp43 = tmp31 + tmp42
tl.store(in_out_ptr0 + (x0), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xv/cxvif4wmp5wras5frqlnhluq6kv5gfytx2ox7xvlqu4woxwboal4.py
# Topologically Sorted Source Nodes: [v_5, add_40, log_10, logsumexp_10, sub_10, mul_10, v_6], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
# Source node to ATen node mapping:
# add_40 => add_50
# log_10 => log_20
# logsumexp_10 => abs_11, add_53, amax_10, eq_10, exp_10, full_default_10, log_21, sub_20, sum_11, where_10
# mul_10 => mul_10
# sub_10 => sub_21
# v_5 => add_44
# v_6 => add_54
# Graph fragment:
# %add_44 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, %add_34), kwargs = {})
# %add_50 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 1e-08), kwargs = {})
# %log_20 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_50,), kwargs = {})
# %amax_10 : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%permute_5, [-1], True), kwargs = {})
# %abs_11 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax_10,), kwargs = {})
# %eq_10 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_11, inf), kwargs = {})
# %full_default_10 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_10, %full_default_10, %amax_10), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute_5, %where_10), kwargs = {})
# %exp_10 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_20,), kwargs = {})
# %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_10, [-1]), kwargs = {})
# %log_21 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_11,), kwargs = {})
# %add_53 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_21, %squeeze_10), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_20, %add_53), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_21, 0.001), kwargs = {})
# %add_54 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_10, %add_44), kwargs = {})
triton_poi_fused_add_log_logsumexp_mul_sub_17 = async_compile.triton('triton_poi_fused_add_log_logsumexp_mul_sub_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_log_logsumexp_mul_sub_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_sub_17(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 16
x4 = (xindex // 4)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (4*x4), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x3), xmask)
tmp5 = tl.load(in_ptr3 + (x3), xmask)
tmp10 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (1 + (4*x4)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (2 + (4*x4)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (3 + (4*x4)), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr4 + (x3), xmask)
tmp1 = -tmp0
tmp3 = tmp1 + tmp2
tmp6 = tmp4 + tmp5
tmp7 = tmp3 + tmp6
tmp8 = 1000.0
tmp9 = tmp7 * tmp8
tmp11 = -tmp10
tmp13 = tmp11 + tmp12
tmp14 = tmp13 + tmp6
tmp15 = tmp14 * tmp8
tmp16 = triton_helpers.maximum(tmp9, tmp15)
tmp18 = -tmp17
tmp20 = tmp18 + tmp19
tmp21 = tmp20 + tmp6
tmp22 = tmp21 * tmp8
tmp23 = triton_helpers.maximum(tmp16, tmp22)
tmp25 = -tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp27 + tmp6
tmp29 = tmp28 * tmp8
tmp30 = triton_helpers.maximum(tmp23, tmp29)
tmp31 = tl_math.abs(tmp30)
tmp32 = float("inf")
tmp33 = tmp31 == tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp33, tmp34, tmp30)
tmp36 = tmp9 - tmp35
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp15 - tmp35
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tmp22 - tmp35
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp40 + tmp42
tmp44 = tmp29 - tmp35
tmp45 = tl_math.exp(tmp44)
tmp46 = tmp43 + tmp45
tmp48 = 1e-08
tmp49 = tmp47 + tmp48
tmp50 = tl_math.log(tmp49)
tmp51 = tl_math.log(tmp46)
tmp52 = tmp51 + tmp35
tmp53 = tmp50 - tmp52
tmp54 = 0.001
tmp55 = tmp53 * tmp54
tmp56 = tmp55 + tmp6
tl.store(in_out_ptr0 + (x3), tmp56, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f4/cf4oq7ktexj2lmonpx7i7du2a6loiff2fth25fxjgaqxbv46oaxj.py
# Topologically Sorted Source Nodes: [add_164, log_41, add_156, log_39, logsumexp_39, sub_39, mul_39, u_20, logsumexp_41, sub_41, mul_41, u_21], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
# Source node to ATen node mapping:
# add_156 => add_195
# add_164 => add_205
# log_39 => log_78
# log_41 => log_82
# logsumexp_39 => add_198, log_79
# logsumexp_41 => abs_42, add_208, amax_41, eq_41, exp_41, full_default_41, log_83, sub_82, sum_42, where_41
# mul_39 => mul_39
# mul_41 => mul_41
# sub_39 => sub_79
# sub_41 => sub_83
# u_20 => add_199
# u_21 => add_209
# Graph fragment:
# %add_205 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-08), kwargs = {})
# %log_82 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_205,), kwargs = {})
# %add_195 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-08), kwargs = {})
# %log_78 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_195,), kwargs = {})
# %log_79 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_40,), kwargs = {})
# %add_198 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_79, %squeeze_39), kwargs = {})
# %sub_79 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_78, %add_198), kwargs = {})
# %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_79, 0.001), kwargs = {})
# %add_199 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_39, %add_189), kwargs = {})
# %amax_41 : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%div_41, [-1], True), kwargs = {})
# %abs_42 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax_41,), kwargs = {})
# %eq_41 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_42, inf), kwargs = {})
# %full_default_41 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_41 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_41, %full_default_41, %amax_41), kwargs = {})
# %sub_82 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_41, %where_41), kwargs = {})
# %exp_41 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_82,), kwargs = {})
# %sum_42 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_41, [-1]), kwargs = {})
# %log_83 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_42,), kwargs = {})
# %add_208 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_83, %squeeze_41), kwargs = {})
# %sub_83 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_82, %add_208), kwargs = {})
# %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_83, 0.001), kwargs = {})
# %add_209 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_41, %add_199), kwargs = {})
triton_poi_fused_add_log_logsumexp_mul_sub_18 = async_compile.triton('triton_poi_fused_add_log_logsumexp_mul_sub_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_log_logsumexp_mul_sub_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_sub_18(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (x0), xmask)
tmp34 = tl.load(in_ptr3 + (x0), xmask)
tmp41 = tl.load(in_ptr4 + (x0), xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = float("inf")
tmp13 = tmp11 == tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = tmp4 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp5 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp9 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp27 + tmp15
tmp29 = tmp3 - tmp28
tmp30 = 0.001
tmp31 = tmp29 * tmp30
tmp33 = tl_math.log(tmp32)
tmp35 = tl_math.abs(tmp34)
tmp36 = tmp35 == tmp12
tmp37 = tl.where(tmp36, tmp14, tmp34)
tmp38 = tmp33 + tmp37
tmp39 = tmp3 - tmp38
tmp40 = tmp39 * tmp30
tmp42 = tmp40 + tmp41
tmp43 = tmp31 + tmp42
tl.store(in_out_ptr0 + (x0), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6w/c6wfdsewba6jlyvroxvinhibmxnx4egc43qldwd5a7y444nq7duw.py
# Topologically Sorted Source Nodes: [neg_200, add_800, add_801, truediv_200, exp, mul_200, cost], Original ATen: [aten.neg, aten.add, aten.div, aten.exp, aten.mul, aten.sum]
# Source node to ATen node mapping:
# add_800 => add_1000
# add_801 => add_1001
# cost => sum_201
# exp => exp_200
# mul_200 => mul_200
# neg_200 => neg_200
# truediv_200 => div_200
# Graph fragment:
# %neg_200 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg2_1,), kwargs = {})
# %add_1000 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_200, %unsqueeze_400), kwargs = {})
# %add_1001 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1000, %unsqueeze_401), kwargs = {})
# %div_200 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1001, 0.001), kwargs = {})
# %exp_200 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_200,), kwargs = {})
# %mul_200 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_200, %arg2_1), kwargs = {})
# %sum_201 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_200, [-2, -1]), kwargs = {})
triton_per_fused_add_div_exp_mul_neg_sum_19 = async_compile.triton('triton_per_fused_add_div_exp_mul_neg_sum_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_exp_mul_neg_sum_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_exp_mul_neg_sum_19(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r4 = rindex
x0 = xindex % 16
r3 = (rindex // 4)
x5 = xindex
r2 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r4 + (16*x0)), xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr1 + (r3 + (4*x5)), xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr2 + (r3 + (4*x5)), xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tl.load(in_ptr3 + (r3 + (4*x5)), xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr4 + (r3 + (4*x5)), xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr5 + (r2 + (4*x5)), xmask, eviction_policy='evict_last', other=0.0)
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float("inf")
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = 1000.0
tmp24 = tmp22 * tmp23
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp25 * tmp0
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.where(xmask, tmp27, 0)
tmp30 = tl.sum(tmp29, 1)[:, None]
tl.store(out_ptr0 + (r4 + (16*x5)), tmp25, xmask)
tl.store(out_ptr1 + (x5), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [logsumexp], Original ATen: [aten.logsumexp]
stream0 = get_raw_stream(0)
triton_poi_fused_logsumexp_0.run(arg2_1, buf0, 1024, grid=grid(1024), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logsumexp], Original ATen: [aten.logsumexp]
triton_poi_fused_logsumexp_1.run(buf0, arg2_1, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [neg_1, add_5, add_6, truediv_1, logsumexp_1], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_2.run(arg2_1, arg1_1, buf1, buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [neg_2, add_9, add_10, truediv_2], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_3.run(arg2_1, arg0_1, buf3, buf2, arg1_1, buf1, buf4, 1024, grid=grid(1024), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_8, log_2, logsumexp_2, sub_2, mul_2], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [neg_3, add_13, add_14], Original ATen: [aten.neg, aten.add]
triton_poi_fused_add_neg_5.run(arg2_1, arg0_1, buf3, buf2, buf5, arg1_1, buf1, buf6, 1024, grid=grid(1024), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_12, log_3, truediv_3, logsumexp_3, sub_3], Original ATen: [aten.add, aten.log, aten.div, aten.logsumexp, aten.sub]
triton_poi_fused_add_div_log_logsumexp_sub_6.run(arg0_1, buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [neg_4, add_17, add_18], Original ATen: [aten.neg, aten.add]
triton_poi_fused_add_neg_7.run(arg2_1, buf7, arg0_1, buf3, buf2, buf5, arg1_1, buf1, buf8, 1024, grid=grid(1024), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_16, log_4, logsumexp_4, sub_4], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub]
triton_poi_fused_add_log_logsumexp_sub_8.run(arg1_1, buf8, buf9, 256, grid=grid(256), stream=stream0)
buf10 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [neg_5, add_21, add_22], Original ATen: [aten.neg, aten.add]
triton_poi_fused_add_neg_9.run(arg2_1, buf7, arg0_1, buf3, buf2, buf9, buf5, arg1_1, buf1, buf10, 1024, grid=grid(1024), stream=stream0)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [add_20, log_5, add_4, log_1, u, logsumexp_1, sub_1, mul_1, u_1, mul_3, u_2, truediv_5, logsumexp_5, sub_5, mul_5, u_3], Original ATen: [aten.add, aten.log, aten.ones_like, aten.logsumexp, aten.sub, aten.mul, aten.div]
triton_poi_fused_add_div_log_logsumexp_mul_ones_like_sub_10.run(buf12, arg0_1, buf10, buf7, buf3, buf2, 256, grid=grid(256), stream=stream0)
del buf2
buf13 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [neg_6, add_25, add_26, truediv_6], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_11.run(arg2_1, buf12, buf9, buf5, arg1_1, buf1, buf13, 1024, grid=grid(1024), stream=stream0)
buf15 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [add, log, v, sub, mul, v_1, v_2, mul_4, v_3, add_24, log_6, logsumexp_6, sub_6, mul_6, v_4], Original ATen: [aten.add, aten.log, aten.ones_like, aten.sub, aten.mul, aten.logsumexp]
triton_poi_fused_add_log_logsumexp_mul_ones_like_sub_12.run(buf15, arg1_1, buf13, buf9, buf5, 256, grid=grid(256), stream=stream0)
buf16 = reinterpret_tensor(buf9, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf9 # reuse
buf17 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [neg_7, add_29, add_30, truediv_7, logsumexp_7], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf12, buf15, buf16, buf17, 256, grid=grid(256), stream=stream0)
buf18 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [neg_8, add_33, add_34, truediv_8], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf17, buf16, buf12, buf15, buf18, 1024, grid=grid(1024), stream=stream0)
buf19 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [add_32, log_8, logsumexp_8, sub_8, mul_8], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf18, buf19, 256, grid=grid(256), stream=stream0)
buf20 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [neg_9, add_37, add_38, truediv_9], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf17, buf16, buf12, buf19, buf15, buf20, 1024, grid=grid(1024), stream=stream0)
buf22 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [add_36, log_9, add_28, log_7, logsumexp_7, sub_7, mul_7, u_4, logsumexp_9, sub_9, mul_9, u_5], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf22, arg0_1, buf20, buf17, buf16, 256, grid=grid(256), stream=stream0)
buf24 = buf17; del buf17 # reuse
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [v_5, add_40, log_10, logsumexp_10, sub_10, mul_10, v_6], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf25, arg2_1, buf22, buf19, buf15, arg1_1, 256, grid=grid(256), stream=stream0)
buf26 = reinterpret_tensor(buf19, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf19 # reuse
buf27 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [neg_11, add_45, add_46, truediv_11, logsumexp_11], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf22, buf25, buf26, buf27, 256, grid=grid(256), stream=stream0)
buf28 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [neg_12, add_49, add_50, truediv_12], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf27, buf26, buf22, buf25, buf28, 1024, grid=grid(1024), stream=stream0)
buf29 = reinterpret_tensor(buf16, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf16 # reuse
# Topologically Sorted Source Nodes: [add_48, log_12, logsumexp_12, sub_12, mul_12], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf28, buf29, 256, grid=grid(256), stream=stream0)
buf30 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [neg_13, add_53, add_54, truediv_13], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf27, buf26, buf22, buf29, buf25, buf30, 1024, grid=grid(1024), stream=stream0)
buf32 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [add_52, log_13, add_44, log_11, logsumexp_11, sub_11, mul_11, u_6, logsumexp_13, sub_13, mul_13, u_7], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf32, arg0_1, buf30, buf27, buf26, 256, grid=grid(256), stream=stream0)
buf34 = buf27; del buf27 # reuse
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [v_7, add_56, log_14, logsumexp_14, sub_14, mul_14, v_8], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf35, arg2_1, buf32, buf29, buf25, arg1_1, 256, grid=grid(256), stream=stream0)
buf36 = reinterpret_tensor(buf29, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf29 # reuse
buf37 = buf25; del buf25 # reuse
# Topologically Sorted Source Nodes: [neg_15, add_61, add_62, truediv_15, logsumexp_15], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf32, buf35, buf36, buf37, 256, grid=grid(256), stream=stream0)
buf38 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [neg_16, add_65, add_66, truediv_16], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf37, buf36, buf32, buf35, buf38, 1024, grid=grid(1024), stream=stream0)
buf39 = reinterpret_tensor(buf26, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf26 # reuse
# Topologically Sorted Source Nodes: [add_64, log_16, logsumexp_16, sub_16, mul_16], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf38, buf39, 256, grid=grid(256), stream=stream0)
buf40 = buf38; del buf38 # reuse
# Topologically Sorted Source Nodes: [neg_17, add_69, add_70, truediv_17], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf37, buf36, buf32, buf39, buf35, buf40, 1024, grid=grid(1024), stream=stream0)
buf42 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [add_68, log_17, add_60, log_15, logsumexp_15, sub_15, mul_15, u_8, logsumexp_17, sub_17, mul_17, u_9], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf42, arg0_1, buf40, buf37, buf36, 256, grid=grid(256), stream=stream0)
buf44 = buf37; del buf37 # reuse
buf45 = buf44; del buf44 # reuse
# Topologically Sorted Source Nodes: [v_9, add_72, log_18, logsumexp_18, sub_18, mul_18, v_10], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf45, arg2_1, buf42, buf39, buf35, arg1_1, 256, grid=grid(256), stream=stream0)
buf46 = reinterpret_tensor(buf39, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf39 # reuse
buf47 = buf35; del buf35 # reuse
# Topologically Sorted Source Nodes: [neg_19, add_77, add_78, truediv_19, logsumexp_19], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf42, buf45, buf46, buf47, 256, grid=grid(256), stream=stream0)
buf48 = buf40; del buf40 # reuse
# Topologically Sorted Source Nodes: [neg_20, add_81, add_82, truediv_20], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf47, buf46, buf42, buf45, buf48, 1024, grid=grid(1024), stream=stream0)
buf49 = reinterpret_tensor(buf36, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf36 # reuse
# Topologically Sorted Source Nodes: [add_80, log_20, logsumexp_20, sub_20, mul_20], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf48, buf49, 256, grid=grid(256), stream=stream0)
buf50 = buf48; del buf48 # reuse
# Topologically Sorted Source Nodes: [neg_21, add_85, add_86, truediv_21], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf47, buf46, buf42, buf49, buf45, buf50, 1024, grid=grid(1024), stream=stream0)
buf52 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [add_84, log_21, add_76, log_19, logsumexp_19, sub_19, mul_19, u_10, logsumexp_21, sub_21, mul_21, u_11], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf52, arg0_1, buf50, buf47, buf46, 256, grid=grid(256), stream=stream0)
buf54 = buf47; del buf47 # reuse
buf55 = buf54; del buf54 # reuse
# Topologically Sorted Source Nodes: [v_11, add_88, log_22, logsumexp_22, sub_22, mul_22, v_12], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf55, arg2_1, buf52, buf49, buf45, arg1_1, 256, grid=grid(256), stream=stream0)
buf56 = reinterpret_tensor(buf49, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf49 # reuse
buf57 = buf45; del buf45 # reuse
# Topologically Sorted Source Nodes: [neg_23, add_93, add_94, truediv_23, logsumexp_23], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf52, buf55, buf56, buf57, 256, grid=grid(256), stream=stream0)
buf58 = buf50; del buf50 # reuse
# Topologically Sorted Source Nodes: [neg_24, add_97, add_98, truediv_24], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf57, buf56, buf52, buf55, buf58, 1024, grid=grid(1024), stream=stream0)
buf59 = reinterpret_tensor(buf46, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf46 # reuse
# Topologically Sorted Source Nodes: [add_96, log_24, logsumexp_24, sub_24, mul_24], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf58, buf59, 256, grid=grid(256), stream=stream0)
buf60 = buf58; del buf58 # reuse
# Topologically Sorted Source Nodes: [neg_25, add_101, add_102, truediv_25], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf57, buf56, buf52, buf59, buf55, buf60, 1024, grid=grid(1024), stream=stream0)
buf62 = buf52; del buf52 # reuse
# Topologically Sorted Source Nodes: [add_100, log_25, add_92, log_23, logsumexp_23, sub_23, mul_23, u_12, logsumexp_25, sub_25, mul_25, u_13], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf62, arg0_1, buf60, buf57, buf56, 256, grid=grid(256), stream=stream0)
buf64 = buf57; del buf57 # reuse
buf65 = buf64; del buf64 # reuse
# Topologically Sorted Source Nodes: [v_13, add_104, log_26, logsumexp_26, sub_26, mul_26, v_14], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf65, arg2_1, buf62, buf59, buf55, arg1_1, 256, grid=grid(256), stream=stream0)
buf66 = reinterpret_tensor(buf59, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf59 # reuse
buf67 = buf55; del buf55 # reuse
# Topologically Sorted Source Nodes: [neg_27, add_109, add_110, truediv_27, logsumexp_27], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf62, buf65, buf66, buf67, 256, grid=grid(256), stream=stream0)
buf68 = buf60; del buf60 # reuse
# Topologically Sorted Source Nodes: [neg_28, add_113, add_114, truediv_28], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf67, buf66, buf62, buf65, buf68, 1024, grid=grid(1024), stream=stream0)
buf69 = reinterpret_tensor(buf56, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf56 # reuse
# Topologically Sorted Source Nodes: [add_112, log_28, logsumexp_28, sub_28, mul_28], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf68, buf69, 256, grid=grid(256), stream=stream0)
buf70 = buf68; del buf68 # reuse
# Topologically Sorted Source Nodes: [neg_29, add_117, add_118, truediv_29], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf67, buf66, buf62, buf69, buf65, buf70, 1024, grid=grid(1024), stream=stream0)
buf72 = buf62; del buf62 # reuse
# Topologically Sorted Source Nodes: [add_116, log_29, add_108, log_27, logsumexp_27, sub_27, mul_27, u_14, logsumexp_29, sub_29, mul_29, u_15], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf72, arg0_1, buf70, buf67, buf66, 256, grid=grid(256), stream=stream0)
buf74 = buf67; del buf67 # reuse
buf75 = buf74; del buf74 # reuse
# Topologically Sorted Source Nodes: [v_15, add_120, log_30, logsumexp_30, sub_30, mul_30, v_16], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf75, arg2_1, buf72, buf69, buf65, arg1_1, 256, grid=grid(256), stream=stream0)
buf76 = reinterpret_tensor(buf69, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf69 # reuse
buf77 = buf65; del buf65 # reuse
# Topologically Sorted Source Nodes: [neg_31, add_125, add_126, truediv_31, logsumexp_31], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf72, buf75, buf76, buf77, 256, grid=grid(256), stream=stream0)
buf78 = buf70; del buf70 # reuse
# Topologically Sorted Source Nodes: [neg_32, add_129, add_130, truediv_32], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf77, buf76, buf72, buf75, buf78, 1024, grid=grid(1024), stream=stream0)
buf79 = reinterpret_tensor(buf66, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf66 # reuse
# Topologically Sorted Source Nodes: [add_128, log_32, logsumexp_32, sub_32, mul_32], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf78, buf79, 256, grid=grid(256), stream=stream0)
buf80 = buf78; del buf78 # reuse
# Topologically Sorted Source Nodes: [neg_33, add_133, add_134, truediv_33], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf77, buf76, buf72, buf79, buf75, buf80, 1024, grid=grid(1024), stream=stream0)
buf82 = buf72; del buf72 # reuse
# Topologically Sorted Source Nodes: [add_132, log_33, add_124, log_31, logsumexp_31, sub_31, mul_31, u_16, logsumexp_33, sub_33, mul_33, u_17], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf82, arg0_1, buf80, buf77, buf76, 256, grid=grid(256), stream=stream0)
buf84 = buf77; del buf77 # reuse
buf85 = buf84; del buf84 # reuse
# Topologically Sorted Source Nodes: [v_17, add_136, log_34, logsumexp_34, sub_34, mul_34, v_18], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf85, arg2_1, buf82, buf79, buf75, arg1_1, 256, grid=grid(256), stream=stream0)
buf86 = reinterpret_tensor(buf79, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf79 # reuse
buf87 = buf75; del buf75 # reuse
# Topologically Sorted Source Nodes: [neg_35, add_141, add_142, truediv_35, logsumexp_35], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf82, buf85, buf86, buf87, 256, grid=grid(256), stream=stream0)
buf88 = buf80; del buf80 # reuse
# Topologically Sorted Source Nodes: [neg_36, add_145, add_146, truediv_36], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf87, buf86, buf82, buf85, buf88, 1024, grid=grid(1024), stream=stream0)
buf89 = reinterpret_tensor(buf76, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf76 # reuse
# Topologically Sorted Source Nodes: [add_144, log_36, logsumexp_36, sub_36, mul_36], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf88, buf89, 256, grid=grid(256), stream=stream0)
buf90 = buf88; del buf88 # reuse
# Topologically Sorted Source Nodes: [neg_37, add_149, add_150, truediv_37], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf87, buf86, buf82, buf89, buf85, buf90, 1024, grid=grid(1024), stream=stream0)
buf92 = buf82; del buf82 # reuse
# Topologically Sorted Source Nodes: [add_148, log_37, add_140, log_35, logsumexp_35, sub_35, mul_35, u_18, logsumexp_37, sub_37, mul_37, u_19], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf92, arg0_1, buf90, buf87, buf86, 256, grid=grid(256), stream=stream0)
buf94 = buf87; del buf87 # reuse
buf95 = buf94; del buf94 # reuse
# Topologically Sorted Source Nodes: [v_19, add_152, log_38, logsumexp_38, sub_38, mul_38, v_20], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf95, arg2_1, buf92, buf89, buf85, arg1_1, 256, grid=grid(256), stream=stream0)
buf96 = reinterpret_tensor(buf89, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf89 # reuse
buf97 = buf85; del buf85 # reuse
# Topologically Sorted Source Nodes: [neg_39, add_157, add_158, truediv_39, logsumexp_39], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf92, buf95, buf96, buf97, 256, grid=grid(256), stream=stream0)
buf98 = buf90; del buf90 # reuse
# Topologically Sorted Source Nodes: [neg_40, add_161, add_162, truediv_40], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf97, buf96, buf92, buf95, buf98, 1024, grid=grid(1024), stream=stream0)
buf99 = reinterpret_tensor(buf86, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf86 # reuse
# Topologically Sorted Source Nodes: [add_160, log_40, logsumexp_40, sub_40, mul_40], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf98, buf99, 256, grid=grid(256), stream=stream0)
buf100 = buf98; del buf98 # reuse
# Topologically Sorted Source Nodes: [neg_41, add_165, add_166, truediv_41], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf97, buf96, buf92, buf99, buf95, buf100, 1024, grid=grid(1024), stream=stream0)
buf101 = buf3; del buf3 # reuse
buf102 = buf101; del buf101 # reuse
# Topologically Sorted Source Nodes: [add_164, log_41, add_156, log_39, logsumexp_39, sub_39, mul_39, u_20, logsumexp_41, sub_41, mul_41, u_21], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_18.run(buf102, arg0_1, buf100, buf97, buf96, buf92, 256, grid=grid(256), stream=stream0)
del buf92
buf104 = buf97; del buf97 # reuse
buf105 = buf104; del buf104 # reuse
# Topologically Sorted Source Nodes: [v_21, add_168, log_42, logsumexp_42, sub_42, mul_42, v_22], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf105, arg2_1, buf102, buf99, buf95, arg1_1, 256, grid=grid(256), stream=stream0)
buf106 = reinterpret_tensor(buf99, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf99 # reuse
buf107 = buf95; del buf95 # reuse
# Topologically Sorted Source Nodes: [neg_43, add_173, add_174, truediv_43, logsumexp_43], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf102, buf105, buf106, buf107, 256, grid=grid(256), stream=stream0)
buf108 = buf100; del buf100 # reuse
# Topologically Sorted Source Nodes: [neg_44, add_177, add_178, truediv_44], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf107, buf106, buf102, buf105, buf108, 1024, grid=grid(1024), stream=stream0)
buf109 = reinterpret_tensor(buf96, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf96 # reuse
# Topologically Sorted Source Nodes: [add_176, log_44, logsumexp_44, sub_44, mul_44], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf108, buf109, 256, grid=grid(256), stream=stream0)
buf110 = buf108; del buf108 # reuse
# Topologically Sorted Source Nodes: [neg_45, add_181, add_182, truediv_45], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf107, buf106, buf102, buf109, buf105, buf110, 1024, grid=grid(1024), stream=stream0)
buf112 = buf102; del buf102 # reuse
# Topologically Sorted Source Nodes: [add_180, log_45, add_172, log_43, logsumexp_43, sub_43, mul_43, u_22, logsumexp_45, sub_45, mul_45, u_23], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf112, arg0_1, buf110, buf107, buf106, 256, grid=grid(256), stream=stream0)
buf114 = buf107; del buf107 # reuse
buf115 = buf114; del buf114 # reuse
# Topologically Sorted Source Nodes: [v_23, add_184, log_46, logsumexp_46, sub_46, mul_46, v_24], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf115, arg2_1, buf112, buf109, buf105, arg1_1, 256, grid=grid(256), stream=stream0)
buf116 = reinterpret_tensor(buf109, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf109 # reuse
buf117 = buf105; del buf105 # reuse
# Topologically Sorted Source Nodes: [neg_47, add_189, add_190, truediv_47, logsumexp_47], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf112, buf115, buf116, buf117, 256, grid=grid(256), stream=stream0)
buf118 = buf110; del buf110 # reuse
# Topologically Sorted Source Nodes: [neg_48, add_193, add_194, truediv_48], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf117, buf116, buf112, buf115, buf118, 1024, grid=grid(1024), stream=stream0)
buf119 = reinterpret_tensor(buf106, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf106 # reuse
# Topologically Sorted Source Nodes: [add_192, log_48, logsumexp_48, sub_48, mul_48], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf118, buf119, 256, grid=grid(256), stream=stream0)
buf120 = buf118; del buf118 # reuse
# Topologically Sorted Source Nodes: [neg_49, add_197, add_198, truediv_49], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf117, buf116, buf112, buf119, buf115, buf120, 1024, grid=grid(1024), stream=stream0)
buf122 = buf112; del buf112 # reuse
# Topologically Sorted Source Nodes: [add_196, log_49, add_188, log_47, logsumexp_47, sub_47, mul_47, u_24, logsumexp_49, sub_49, mul_49, u_25], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf122, arg0_1, buf120, buf117, buf116, 256, grid=grid(256), stream=stream0)
buf124 = buf117; del buf117 # reuse
buf125 = buf124; del buf124 # reuse
# Topologically Sorted Source Nodes: [v_25, add_200, log_50, logsumexp_50, sub_50, mul_50, v_26], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf125, arg2_1, buf122, buf119, buf115, arg1_1, 256, grid=grid(256), stream=stream0)
buf126 = reinterpret_tensor(buf119, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf119 # reuse
buf127 = buf115; del buf115 # reuse
# Topologically Sorted Source Nodes: [neg_51, add_205, add_206, truediv_51, logsumexp_51], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf122, buf125, buf126, buf127, 256, grid=grid(256), stream=stream0)
buf128 = buf120; del buf120 # reuse
# Topologically Sorted Source Nodes: [neg_52, add_209, add_210, truediv_52], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf127, buf126, buf122, buf125, buf128, 1024, grid=grid(1024), stream=stream0)
buf129 = reinterpret_tensor(buf116, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf116 # reuse
# Topologically Sorted Source Nodes: [add_208, log_52, logsumexp_52, sub_52, mul_52], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf128, buf129, 256, grid=grid(256), stream=stream0)
buf130 = buf128; del buf128 # reuse
# Topologically Sorted Source Nodes: [neg_53, add_213, add_214, truediv_53], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf127, buf126, buf122, buf129, buf125, buf130, 1024, grid=grid(1024), stream=stream0)
buf132 = buf122; del buf122 # reuse
# Topologically Sorted Source Nodes: [add_212, log_53, add_204, log_51, logsumexp_51, sub_51, mul_51, u_26, logsumexp_53, sub_53, mul_53, u_27], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf132, arg0_1, buf130, buf127, buf126, 256, grid=grid(256), stream=stream0)
buf134 = buf127; del buf127 # reuse
buf135 = buf134; del buf134 # reuse
# Topologically Sorted Source Nodes: [v_27, add_216, log_54, logsumexp_54, sub_54, mul_54, v_28], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf135, arg2_1, buf132, buf129, buf125, arg1_1, 256, grid=grid(256), stream=stream0)
buf136 = reinterpret_tensor(buf129, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf129 # reuse
buf137 = buf125; del buf125 # reuse
# Topologically Sorted Source Nodes: [neg_55, add_221, add_222, truediv_55, logsumexp_55], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf132, buf135, buf136, buf137, 256, grid=grid(256), stream=stream0)
buf138 = buf130; del buf130 # reuse
# Topologically Sorted Source Nodes: [neg_56, add_225, add_226, truediv_56], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf137, buf136, buf132, buf135, buf138, 1024, grid=grid(1024), stream=stream0)
buf139 = reinterpret_tensor(buf126, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf126 # reuse
# Topologically Sorted Source Nodes: [add_224, log_56, logsumexp_56, sub_56, mul_56], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf138, buf139, 256, grid=grid(256), stream=stream0)
buf140 = buf138; del buf138 # reuse
# Topologically Sorted Source Nodes: [neg_57, add_229, add_230, truediv_57], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf137, buf136, buf132, buf139, buf135, buf140, 1024, grid=grid(1024), stream=stream0)
buf142 = buf132; del buf132 # reuse
# Topologically Sorted Source Nodes: [add_228, log_57, add_220, log_55, logsumexp_55, sub_55, mul_55, u_28, logsumexp_57, sub_57, mul_57, u_29], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf142, arg0_1, buf140, buf137, buf136, 256, grid=grid(256), stream=stream0)
buf144 = buf137; del buf137 # reuse
buf145 = buf144; del buf144 # reuse
# Topologically Sorted Source Nodes: [v_29, add_232, log_58, logsumexp_58, sub_58, mul_58, v_30], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf145, arg2_1, buf142, buf139, buf135, arg1_1, 256, grid=grid(256), stream=stream0)
buf146 = reinterpret_tensor(buf139, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf139 # reuse
buf147 = buf135; del buf135 # reuse
# Topologically Sorted Source Nodes: [neg_59, add_237, add_238, truediv_59, logsumexp_59], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf142, buf145, buf146, buf147, 256, grid=grid(256), stream=stream0)
buf148 = buf140; del buf140 # reuse
# Topologically Sorted Source Nodes: [neg_60, add_241, add_242, truediv_60], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf147, buf146, buf142, buf145, buf148, 1024, grid=grid(1024), stream=stream0)
buf149 = reinterpret_tensor(buf136, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf136 # reuse
# Topologically Sorted Source Nodes: [add_240, log_60, logsumexp_60, sub_60, mul_60], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf148, buf149, 256, grid=grid(256), stream=stream0)
buf150 = buf148; del buf148 # reuse
# Topologically Sorted Source Nodes: [neg_61, add_245, add_246, truediv_61], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf147, buf146, buf142, buf149, buf145, buf150, 1024, grid=grid(1024), stream=stream0)
buf152 = buf142; del buf142 # reuse
# Topologically Sorted Source Nodes: [add_244, log_61, add_236, log_59, logsumexp_59, sub_59, mul_59, u_30, logsumexp_61, sub_61, mul_61, u_31], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf152, arg0_1, buf150, buf147, buf146, 256, grid=grid(256), stream=stream0)
buf154 = buf147; del buf147 # reuse
buf155 = buf154; del buf154 # reuse
# Topologically Sorted Source Nodes: [v_31, add_248, log_62, logsumexp_62, sub_62, mul_62, v_32], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf155, arg2_1, buf152, buf149, buf145, arg1_1, 256, grid=grid(256), stream=stream0)
buf156 = reinterpret_tensor(buf149, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf149 # reuse
buf157 = buf145; del buf145 # reuse
# Topologically Sorted Source Nodes: [neg_63, add_253, add_254, truediv_63, logsumexp_63], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf152, buf155, buf156, buf157, 256, grid=grid(256), stream=stream0)
buf158 = buf150; del buf150 # reuse
# Topologically Sorted Source Nodes: [neg_64, add_257, add_258, truediv_64], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf157, buf156, buf152, buf155, buf158, 1024, grid=grid(1024), stream=stream0)
buf159 = reinterpret_tensor(buf146, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf146 # reuse
# Topologically Sorted Source Nodes: [add_256, log_64, logsumexp_64, sub_64, mul_64], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf158, buf159, 256, grid=grid(256), stream=stream0)
buf160 = buf158; del buf158 # reuse
# Topologically Sorted Source Nodes: [neg_65, add_261, add_262, truediv_65], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf157, buf156, buf152, buf159, buf155, buf160, 1024, grid=grid(1024), stream=stream0)
buf162 = buf152; del buf152 # reuse
# Topologically Sorted Source Nodes: [add_260, log_65, add_252, log_63, logsumexp_63, sub_63, mul_63, u_32, logsumexp_65, sub_65, mul_65, u_33], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf162, arg0_1, buf160, buf157, buf156, 256, grid=grid(256), stream=stream0)
buf164 = buf157; del buf157 # reuse
buf165 = buf164; del buf164 # reuse
# Topologically Sorted Source Nodes: [v_33, add_264, log_66, logsumexp_66, sub_66, mul_66, v_34], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf165, arg2_1, buf162, buf159, buf155, arg1_1, 256, grid=grid(256), stream=stream0)
buf166 = reinterpret_tensor(buf159, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf159 # reuse
buf167 = buf155; del buf155 # reuse
# Topologically Sorted Source Nodes: [neg_67, add_269, add_270, truediv_67, logsumexp_67], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf162, buf165, buf166, buf167, 256, grid=grid(256), stream=stream0)
buf168 = buf160; del buf160 # reuse
# Topologically Sorted Source Nodes: [neg_68, add_273, add_274, truediv_68], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf167, buf166, buf162, buf165, buf168, 1024, grid=grid(1024), stream=stream0)
buf169 = reinterpret_tensor(buf156, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf156 # reuse
# Topologically Sorted Source Nodes: [add_272, log_68, logsumexp_68, sub_68, mul_68], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf168, buf169, 256, grid=grid(256), stream=stream0)
buf170 = buf168; del buf168 # reuse
# Topologically Sorted Source Nodes: [neg_69, add_277, add_278, truediv_69], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf167, buf166, buf162, buf169, buf165, buf170, 1024, grid=grid(1024), stream=stream0)
buf172 = buf162; del buf162 # reuse
# Topologically Sorted Source Nodes: [add_276, log_69, add_268, log_67, logsumexp_67, sub_67, mul_67, u_34, logsumexp_69, sub_69, mul_69, u_35], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf172, arg0_1, buf170, buf167, buf166, 256, grid=grid(256), stream=stream0)
buf174 = buf167; del buf167 # reuse
buf175 = buf174; del buf174 # reuse
# Topologically Sorted Source Nodes: [v_35, add_280, log_70, logsumexp_70, sub_70, mul_70, v_36], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf175, arg2_1, buf172, buf169, buf165, arg1_1, 256, grid=grid(256), stream=stream0)
buf176 = reinterpret_tensor(buf169, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf169 # reuse
buf177 = buf165; del buf165 # reuse
# Topologically Sorted Source Nodes: [neg_71, add_285, add_286, truediv_71, logsumexp_71], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf172, buf175, buf176, buf177, 256, grid=grid(256), stream=stream0)
buf178 = buf170; del buf170 # reuse
# Topologically Sorted Source Nodes: [neg_72, add_289, add_290, truediv_72], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf177, buf176, buf172, buf175, buf178, 1024, grid=grid(1024), stream=stream0)
buf179 = reinterpret_tensor(buf166, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf166 # reuse
# Topologically Sorted Source Nodes: [add_288, log_72, logsumexp_72, sub_72, mul_72], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf178, buf179, 256, grid=grid(256), stream=stream0)
buf180 = buf178; del buf178 # reuse
# Topologically Sorted Source Nodes: [neg_73, add_293, add_294, truediv_73], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf177, buf176, buf172, buf179, buf175, buf180, 1024, grid=grid(1024), stream=stream0)
buf182 = buf172; del buf172 # reuse
# Topologically Sorted Source Nodes: [add_292, log_73, add_284, log_71, logsumexp_71, sub_71, mul_71, u_36, logsumexp_73, sub_73, mul_73, u_37], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf182, arg0_1, buf180, buf177, buf176, 256, grid=grid(256), stream=stream0)
buf184 = buf177; del buf177 # reuse
buf185 = buf184; del buf184 # reuse
# Topologically Sorted Source Nodes: [v_37, add_296, log_74, logsumexp_74, sub_74, mul_74, v_38], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf185, arg2_1, buf182, buf179, buf175, arg1_1, 256, grid=grid(256), stream=stream0)
buf186 = reinterpret_tensor(buf179, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf179 # reuse
buf187 = buf175; del buf175 # reuse
# Topologically Sorted Source Nodes: [neg_75, add_301, add_302, truediv_75, logsumexp_75], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf182, buf185, buf186, buf187, 256, grid=grid(256), stream=stream0)
buf188 = buf180; del buf180 # reuse
# Topologically Sorted Source Nodes: [neg_76, add_305, add_306, truediv_76], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf187, buf186, buf182, buf185, buf188, 1024, grid=grid(1024), stream=stream0)
buf189 = reinterpret_tensor(buf176, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf176 # reuse
# Topologically Sorted Source Nodes: [add_304, log_76, logsumexp_76, sub_76, mul_76], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf188, buf189, 256, grid=grid(256), stream=stream0)
buf190 = buf188; del buf188 # reuse
# Topologically Sorted Source Nodes: [neg_77, add_309, add_310, truediv_77], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf187, buf186, buf182, buf189, buf185, buf190, 1024, grid=grid(1024), stream=stream0)
buf192 = buf182; del buf182 # reuse
# Topologically Sorted Source Nodes: [add_308, log_77, add_300, log_75, logsumexp_75, sub_75, mul_75, u_38, logsumexp_77, sub_77, mul_77, u_39], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf192, arg0_1, buf190, buf187, buf186, 256, grid=grid(256), stream=stream0)
buf194 = buf187; del buf187 # reuse
buf195 = buf194; del buf194 # reuse
# Topologically Sorted Source Nodes: [v_39, add_312, log_78, logsumexp_78, sub_78, mul_78, v_40], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf195, arg2_1, buf192, buf189, buf185, arg1_1, 256, grid=grid(256), stream=stream0)
buf196 = reinterpret_tensor(buf189, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf189 # reuse
buf197 = buf185; del buf185 # reuse
# Topologically Sorted Source Nodes: [neg_79, add_317, add_318, truediv_79, logsumexp_79], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf192, buf195, buf196, buf197, 256, grid=grid(256), stream=stream0)
buf198 = buf190; del buf190 # reuse
# Topologically Sorted Source Nodes: [neg_80, add_321, add_322, truediv_80], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf197, buf196, buf192, buf195, buf198, 1024, grid=grid(1024), stream=stream0)
buf199 = reinterpret_tensor(buf186, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf186 # reuse
# Topologically Sorted Source Nodes: [add_320, log_80, logsumexp_80, sub_80, mul_80], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf198, buf199, 256, grid=grid(256), stream=stream0)
buf200 = buf198; del buf198 # reuse
# Topologically Sorted Source Nodes: [neg_81, add_325, add_326, truediv_81], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf197, buf196, buf192, buf199, buf195, buf200, 1024, grid=grid(1024), stream=stream0)
buf202 = buf192; del buf192 # reuse
# Topologically Sorted Source Nodes: [add_324, log_81, add_316, log_79, logsumexp_79, sub_79, mul_79, u_40, logsumexp_81, sub_81, mul_81, u_41], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf202, arg0_1, buf200, buf197, buf196, 256, grid=grid(256), stream=stream0)
buf204 = buf197; del buf197 # reuse
buf205 = buf204; del buf204 # reuse
# Topologically Sorted Source Nodes: [v_41, add_328, log_82, logsumexp_82, sub_82, mul_82, v_42], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf205, arg2_1, buf202, buf199, buf195, arg1_1, 256, grid=grid(256), stream=stream0)
buf206 = reinterpret_tensor(buf199, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf199 # reuse
buf207 = buf195; del buf195 # reuse
# Topologically Sorted Source Nodes: [neg_83, add_333, add_334, truediv_83, logsumexp_83], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf202, buf205, buf206, buf207, 256, grid=grid(256), stream=stream0)
buf208 = buf200; del buf200 # reuse
# Topologically Sorted Source Nodes: [neg_84, add_337, add_338, truediv_84], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf207, buf206, buf202, buf205, buf208, 1024, grid=grid(1024), stream=stream0)
buf209 = reinterpret_tensor(buf196, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf196 # reuse
# Topologically Sorted Source Nodes: [add_336, log_84, logsumexp_84, sub_84, mul_84], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf208, buf209, 256, grid=grid(256), stream=stream0)
buf210 = buf208; del buf208 # reuse
# Topologically Sorted Source Nodes: [neg_85, add_341, add_342, truediv_85], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf207, buf206, buf202, buf209, buf205, buf210, 1024, grid=grid(1024), stream=stream0)
buf212 = buf202; del buf202 # reuse
# Topologically Sorted Source Nodes: [add_340, log_85, add_332, log_83, logsumexp_83, sub_83, mul_83, u_42, logsumexp_85, sub_85, mul_85, u_43], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf212, arg0_1, buf210, buf207, buf206, 256, grid=grid(256), stream=stream0)
buf214 = buf207; del buf207 # reuse
buf215 = buf214; del buf214 # reuse
# Topologically Sorted Source Nodes: [v_43, add_344, log_86, logsumexp_86, sub_86, mul_86, v_44], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf215, arg2_1, buf212, buf209, buf205, arg1_1, 256, grid=grid(256), stream=stream0)
buf216 = reinterpret_tensor(buf209, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf209 # reuse
buf217 = buf205; del buf205 # reuse
# Topologically Sorted Source Nodes: [neg_87, add_349, add_350, truediv_87, logsumexp_87], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf212, buf215, buf216, buf217, 256, grid=grid(256), stream=stream0)
buf218 = buf210; del buf210 # reuse
# Topologically Sorted Source Nodes: [neg_88, add_353, add_354, truediv_88], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf217, buf216, buf212, buf215, buf218, 1024, grid=grid(1024), stream=stream0)
buf219 = reinterpret_tensor(buf206, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf206 # reuse
# Topologically Sorted Source Nodes: [add_352, log_88, logsumexp_88, sub_88, mul_88], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf218, buf219, 256, grid=grid(256), stream=stream0)
buf220 = buf218; del buf218 # reuse
# Topologically Sorted Source Nodes: [neg_89, add_357, add_358, truediv_89], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf217, buf216, buf212, buf219, buf215, buf220, 1024, grid=grid(1024), stream=stream0)
buf222 = buf212; del buf212 # reuse
# Topologically Sorted Source Nodes: [add_356, log_89, add_348, log_87, logsumexp_87, sub_87, mul_87, u_44, logsumexp_89, sub_89, mul_89, u_45], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf222, arg0_1, buf220, buf217, buf216, 256, grid=grid(256), stream=stream0)
buf224 = buf217; del buf217 # reuse
buf225 = buf224; del buf224 # reuse
# Topologically Sorted Source Nodes: [v_45, add_360, log_90, logsumexp_90, sub_90, mul_90, v_46], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf225, arg2_1, buf222, buf219, buf215, arg1_1, 256, grid=grid(256), stream=stream0)
buf226 = reinterpret_tensor(buf219, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf219 # reuse
buf227 = buf215; del buf215 # reuse
# Topologically Sorted Source Nodes: [neg_91, add_365, add_366, truediv_91, logsumexp_91], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf222, buf225, buf226, buf227, 256, grid=grid(256), stream=stream0)
buf228 = buf220; del buf220 # reuse
# Topologically Sorted Source Nodes: [neg_92, add_369, add_370, truediv_92], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf227, buf226, buf222, buf225, buf228, 1024, grid=grid(1024), stream=stream0)
buf229 = reinterpret_tensor(buf216, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf216 # reuse
# Topologically Sorted Source Nodes: [add_368, log_92, logsumexp_92, sub_92, mul_92], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf228, buf229, 256, grid=grid(256), stream=stream0)
buf230 = buf228; del buf228 # reuse
# Topologically Sorted Source Nodes: [neg_93, add_373, add_374, truediv_93], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf227, buf226, buf222, buf229, buf225, buf230, 1024, grid=grid(1024), stream=stream0)
buf232 = buf222; del buf222 # reuse
# Topologically Sorted Source Nodes: [add_372, log_93, add_364, log_91, logsumexp_91, sub_91, mul_91, u_46, logsumexp_93, sub_93, mul_93, u_47], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf232, arg0_1, buf230, buf227, buf226, 256, grid=grid(256), stream=stream0)
buf234 = buf227; del buf227 # reuse
buf235 = buf234; del buf234 # reuse
# Topologically Sorted Source Nodes: [v_47, add_376, log_94, logsumexp_94, sub_94, mul_94, v_48], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf235, arg2_1, buf232, buf229, buf225, arg1_1, 256, grid=grid(256), stream=stream0)
buf236 = reinterpret_tensor(buf229, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf229 # reuse
buf237 = buf225; del buf225 # reuse
# Topologically Sorted Source Nodes: [neg_95, add_381, add_382, truediv_95, logsumexp_95], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf232, buf235, buf236, buf237, 256, grid=grid(256), stream=stream0)
buf238 = buf230; del buf230 # reuse
# Topologically Sorted Source Nodes: [neg_96, add_385, add_386, truediv_96], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf237, buf236, buf232, buf235, buf238, 1024, grid=grid(1024), stream=stream0)
buf239 = reinterpret_tensor(buf226, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf226 # reuse
# Topologically Sorted Source Nodes: [add_384, log_96, logsumexp_96, sub_96, mul_96], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf238, buf239, 256, grid=grid(256), stream=stream0)
buf240 = buf238; del buf238 # reuse
# Topologically Sorted Source Nodes: [neg_97, add_389, add_390, truediv_97], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf237, buf236, buf232, buf239, buf235, buf240, 1024, grid=grid(1024), stream=stream0)
buf242 = buf232; del buf232 # reuse
# Topologically Sorted Source Nodes: [add_388, log_97, add_380, log_95, logsumexp_95, sub_95, mul_95, u_48, logsumexp_97, sub_97, mul_97, u_49], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf242, arg0_1, buf240, buf237, buf236, 256, grid=grid(256), stream=stream0)
buf244 = buf237; del buf237 # reuse
buf245 = buf244; del buf244 # reuse
# Topologically Sorted Source Nodes: [v_49, add_392, log_98, logsumexp_98, sub_98, mul_98, v_50], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf245, arg2_1, buf242, buf239, buf235, arg1_1, 256, grid=grid(256), stream=stream0)
buf246 = reinterpret_tensor(buf239, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf239 # reuse
buf247 = buf235; del buf235 # reuse
# Topologically Sorted Source Nodes: [neg_99, add_397, add_398, truediv_99, logsumexp_99], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf242, buf245, buf246, buf247, 256, grid=grid(256), stream=stream0)
buf248 = buf240; del buf240 # reuse
# Topologically Sorted Source Nodes: [neg_100, add_401, add_402, truediv_100], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf247, buf246, buf242, buf245, buf248, 1024, grid=grid(1024), stream=stream0)
buf249 = reinterpret_tensor(buf236, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf236 # reuse
# Topologically Sorted Source Nodes: [add_400, log_100, logsumexp_100, sub_100, mul_100], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf248, buf249, 256, grid=grid(256), stream=stream0)
buf250 = buf248; del buf248 # reuse
# Topologically Sorted Source Nodes: [neg_101, add_405, add_406, truediv_101], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf247, buf246, buf242, buf249, buf245, buf250, 1024, grid=grid(1024), stream=stream0)
buf252 = buf242; del buf242 # reuse
# Topologically Sorted Source Nodes: [add_404, log_101, add_396, log_99, logsumexp_99, sub_99, mul_99, u_50, logsumexp_101, sub_101, mul_101, u_51], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf252, arg0_1, buf250, buf247, buf246, 256, grid=grid(256), stream=stream0)
buf254 = buf247; del buf247 # reuse
buf255 = buf254; del buf254 # reuse
# Topologically Sorted Source Nodes: [v_51, add_408, log_102, logsumexp_102, sub_102, mul_102, v_52], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf255, arg2_1, buf252, buf249, buf245, arg1_1, 256, grid=grid(256), stream=stream0)
buf256 = reinterpret_tensor(buf249, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf249 # reuse
buf257 = buf245; del buf245 # reuse
# Topologically Sorted Source Nodes: [neg_103, add_413, add_414, truediv_103, logsumexp_103], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf252, buf255, buf256, buf257, 256, grid=grid(256), stream=stream0)
buf258 = buf250; del buf250 # reuse
# Topologically Sorted Source Nodes: [neg_104, add_417, add_418, truediv_104], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf257, buf256, buf252, buf255, buf258, 1024, grid=grid(1024), stream=stream0)
buf259 = reinterpret_tensor(buf246, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf246 # reuse
# Topologically Sorted Source Nodes: [add_416, log_104, logsumexp_104, sub_104, mul_104], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf258, buf259, 256, grid=grid(256), stream=stream0)
buf260 = buf258; del buf258 # reuse
# Topologically Sorted Source Nodes: [neg_105, add_421, add_422, truediv_105], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf257, buf256, buf252, buf259, buf255, buf260, 1024, grid=grid(1024), stream=stream0)
buf262 = buf252; del buf252 # reuse
# Topologically Sorted Source Nodes: [add_420, log_105, add_412, log_103, logsumexp_103, sub_103, mul_103, u_52, logsumexp_105, sub_105, mul_105, u_53], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf262, arg0_1, buf260, buf257, buf256, 256, grid=grid(256), stream=stream0)
buf264 = buf257; del buf257 # reuse
buf265 = buf264; del buf264 # reuse
# Topologically Sorted Source Nodes: [v_53, add_424, log_106, logsumexp_106, sub_106, mul_106, v_54], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf265, arg2_1, buf262, buf259, buf255, arg1_1, 256, grid=grid(256), stream=stream0)
buf266 = reinterpret_tensor(buf259, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf259 # reuse
buf267 = buf255; del buf255 # reuse
# Topologically Sorted Source Nodes: [neg_107, add_429, add_430, truediv_107, logsumexp_107], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf262, buf265, buf266, buf267, 256, grid=grid(256), stream=stream0)
buf268 = buf260; del buf260 # reuse
# Topologically Sorted Source Nodes: [neg_108, add_433, add_434, truediv_108], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf267, buf266, buf262, buf265, buf268, 1024, grid=grid(1024), stream=stream0)
buf269 = reinterpret_tensor(buf256, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf256 # reuse
# Topologically Sorted Source Nodes: [add_432, log_108, logsumexp_108, sub_108, mul_108], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf268, buf269, 256, grid=grid(256), stream=stream0)
buf270 = buf268; del buf268 # reuse
# Topologically Sorted Source Nodes: [neg_109, add_437, add_438, truediv_109], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf267, buf266, buf262, buf269, buf265, buf270, 1024, grid=grid(1024), stream=stream0)
buf272 = buf262; del buf262 # reuse
# Topologically Sorted Source Nodes: [add_436, log_109, add_428, log_107, logsumexp_107, sub_107, mul_107, u_54, logsumexp_109, sub_109, mul_109, u_55], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf272, arg0_1, buf270, buf267, buf266, 256, grid=grid(256), stream=stream0)
buf274 = buf267; del buf267 # reuse
buf275 = buf274; del buf274 # reuse
# Topologically Sorted Source Nodes: [v_55, add_440, log_110, logsumexp_110, sub_110, mul_110, v_56], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf275, arg2_1, buf272, buf269, buf265, arg1_1, 256, grid=grid(256), stream=stream0)
buf276 = reinterpret_tensor(buf269, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf269 # reuse
buf277 = buf265; del buf265 # reuse
# Topologically Sorted Source Nodes: [neg_111, add_445, add_446, truediv_111, logsumexp_111], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf272, buf275, buf276, buf277, 256, grid=grid(256), stream=stream0)
buf278 = buf270; del buf270 # reuse
# Topologically Sorted Source Nodes: [neg_112, add_449, add_450, truediv_112], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf277, buf276, buf272, buf275, buf278, 1024, grid=grid(1024), stream=stream0)
buf279 = reinterpret_tensor(buf266, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf266 # reuse
# Topologically Sorted Source Nodes: [add_448, log_112, logsumexp_112, sub_112, mul_112], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf278, buf279, 256, grid=grid(256), stream=stream0)
buf280 = buf278; del buf278 # reuse
# Topologically Sorted Source Nodes: [neg_113, add_453, add_454, truediv_113], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf277, buf276, buf272, buf279, buf275, buf280, 1024, grid=grid(1024), stream=stream0)
buf282 = buf272; del buf272 # reuse
# Topologically Sorted Source Nodes: [add_452, log_113, add_444, log_111, logsumexp_111, sub_111, mul_111, u_56, logsumexp_113, sub_113, mul_113, u_57], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf282, arg0_1, buf280, buf277, buf276, 256, grid=grid(256), stream=stream0)
buf284 = buf277; del buf277 # reuse
buf285 = buf284; del buf284 # reuse
# Topologically Sorted Source Nodes: [v_57, add_456, log_114, logsumexp_114, sub_114, mul_114, v_58], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf285, arg2_1, buf282, buf279, buf275, arg1_1, 256, grid=grid(256), stream=stream0)
buf286 = reinterpret_tensor(buf279, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf279 # reuse
buf287 = buf275; del buf275 # reuse
# Topologically Sorted Source Nodes: [neg_115, add_461, add_462, truediv_115, logsumexp_115], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf282, buf285, buf286, buf287, 256, grid=grid(256), stream=stream0)
buf288 = buf280; del buf280 # reuse
# Topologically Sorted Source Nodes: [neg_116, add_465, add_466, truediv_116], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf287, buf286, buf282, buf285, buf288, 1024, grid=grid(1024), stream=stream0)
buf289 = reinterpret_tensor(buf276, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf276 # reuse
# Topologically Sorted Source Nodes: [add_464, log_116, logsumexp_116, sub_116, mul_116], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf288, buf289, 256, grid=grid(256), stream=stream0)
buf290 = buf288; del buf288 # reuse
# Topologically Sorted Source Nodes: [neg_117, add_469, add_470, truediv_117], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf287, buf286, buf282, buf289, buf285, buf290, 1024, grid=grid(1024), stream=stream0)
buf292 = buf282; del buf282 # reuse
# Topologically Sorted Source Nodes: [add_468, log_117, add_460, log_115, logsumexp_115, sub_115, mul_115, u_58, logsumexp_117, sub_117, mul_117, u_59], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf292, arg0_1, buf290, buf287, buf286, 256, grid=grid(256), stream=stream0)
buf294 = buf287; del buf287 # reuse
buf295 = buf294; del buf294 # reuse
# Topologically Sorted Source Nodes: [v_59, add_472, log_118, logsumexp_118, sub_118, mul_118, v_60], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf295, arg2_1, buf292, buf289, buf285, arg1_1, 256, grid=grid(256), stream=stream0)
buf296 = reinterpret_tensor(buf289, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf289 # reuse
buf297 = buf285; del buf285 # reuse
# Topologically Sorted Source Nodes: [neg_119, add_477, add_478, truediv_119, logsumexp_119], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf292, buf295, buf296, buf297, 256, grid=grid(256), stream=stream0)
buf298 = buf290; del buf290 # reuse
# Topologically Sorted Source Nodes: [neg_120, add_481, add_482, truediv_120], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf297, buf296, buf292, buf295, buf298, 1024, grid=grid(1024), stream=stream0)
buf299 = reinterpret_tensor(buf286, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf286 # reuse
# Topologically Sorted Source Nodes: [add_480, log_120, logsumexp_120, sub_120, mul_120], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf298, buf299, 256, grid=grid(256), stream=stream0)
buf300 = buf298; del buf298 # reuse
# Topologically Sorted Source Nodes: [neg_121, add_485, add_486, truediv_121], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf297, buf296, buf292, buf299, buf295, buf300, 1024, grid=grid(1024), stream=stream0)
buf302 = buf292; del buf292 # reuse
# Topologically Sorted Source Nodes: [add_484, log_121, add_476, log_119, logsumexp_119, sub_119, mul_119, u_60, logsumexp_121, sub_121, mul_121, u_61], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf302, arg0_1, buf300, buf297, buf296, 256, grid=grid(256), stream=stream0)
buf304 = buf297; del buf297 # reuse
buf305 = buf304; del buf304 # reuse
# Topologically Sorted Source Nodes: [v_61, add_488, log_122, logsumexp_122, sub_122, mul_122, v_62], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf305, arg2_1, buf302, buf299, buf295, arg1_1, 256, grid=grid(256), stream=stream0)
buf306 = reinterpret_tensor(buf299, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf299 # reuse
buf307 = buf295; del buf295 # reuse
# Topologically Sorted Source Nodes: [neg_123, add_493, add_494, truediv_123, logsumexp_123], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf302, buf305, buf306, buf307, 256, grid=grid(256), stream=stream0)
buf308 = buf300; del buf300 # reuse
# Topologically Sorted Source Nodes: [neg_124, add_497, add_498, truediv_124], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf307, buf306, buf302, buf305, buf308, 1024, grid=grid(1024), stream=stream0)
buf309 = reinterpret_tensor(buf296, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf296 # reuse
# Topologically Sorted Source Nodes: [add_496, log_124, logsumexp_124, sub_124, mul_124], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf308, buf309, 256, grid=grid(256), stream=stream0)
buf310 = buf308; del buf308 # reuse
# Topologically Sorted Source Nodes: [neg_125, add_501, add_502, truediv_125], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf307, buf306, buf302, buf309, buf305, buf310, 1024, grid=grid(1024), stream=stream0)
buf312 = buf302; del buf302 # reuse
# Topologically Sorted Source Nodes: [add_500, log_125, add_492, log_123, logsumexp_123, sub_123, mul_123, u_62, logsumexp_125, sub_125, mul_125, u_63], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf312, arg0_1, buf310, buf307, buf306, 256, grid=grid(256), stream=stream0)
buf314 = buf307; del buf307 # reuse
buf315 = buf314; del buf314 # reuse
# Topologically Sorted Source Nodes: [v_63, add_504, log_126, logsumexp_126, sub_126, mul_126, v_64], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf315, arg2_1, buf312, buf309, buf305, arg1_1, 256, grid=grid(256), stream=stream0)
buf316 = reinterpret_tensor(buf309, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf309 # reuse
buf317 = buf305; del buf305 # reuse
# Topologically Sorted Source Nodes: [neg_127, add_509, add_510, truediv_127, logsumexp_127], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf312, buf315, buf316, buf317, 256, grid=grid(256), stream=stream0)
buf318 = buf310; del buf310 # reuse
# Topologically Sorted Source Nodes: [neg_128, add_513, add_514, truediv_128], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf317, buf316, buf312, buf315, buf318, 1024, grid=grid(1024), stream=stream0)
buf319 = reinterpret_tensor(buf306, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf306 # reuse
# Topologically Sorted Source Nodes: [add_512, log_128, logsumexp_128, sub_128, mul_128], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf318, buf319, 256, grid=grid(256), stream=stream0)
buf320 = buf318; del buf318 # reuse
# Topologically Sorted Source Nodes: [neg_129, add_517, add_518, truediv_129], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf317, buf316, buf312, buf319, buf315, buf320, 1024, grid=grid(1024), stream=stream0)
buf322 = buf312; del buf312 # reuse
# Topologically Sorted Source Nodes: [add_516, log_129, add_508, log_127, logsumexp_127, sub_127, mul_127, u_64, logsumexp_129, sub_129, mul_129, u_65], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf322, arg0_1, buf320, buf317, buf316, 256, grid=grid(256), stream=stream0)
buf324 = buf317; del buf317 # reuse
buf325 = buf324; del buf324 # reuse
# Topologically Sorted Source Nodes: [v_65, add_520, log_130, logsumexp_130, sub_130, mul_130, v_66], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf325, arg2_1, buf322, buf319, buf315, arg1_1, 256, grid=grid(256), stream=stream0)
buf326 = reinterpret_tensor(buf319, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf319 # reuse
buf327 = buf315; del buf315 # reuse
# Topologically Sorted Source Nodes: [neg_131, add_525, add_526, truediv_131, logsumexp_131], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf322, buf325, buf326, buf327, 256, grid=grid(256), stream=stream0)
buf328 = buf320; del buf320 # reuse
# Topologically Sorted Source Nodes: [neg_132, add_529, add_530, truediv_132], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf327, buf326, buf322, buf325, buf328, 1024, grid=grid(1024), stream=stream0)
buf329 = reinterpret_tensor(buf316, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf316 # reuse
# Topologically Sorted Source Nodes: [add_528, log_132, logsumexp_132, sub_132, mul_132], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf328, buf329, 256, grid=grid(256), stream=stream0)
buf330 = buf328; del buf328 # reuse
# Topologically Sorted Source Nodes: [neg_133, add_533, add_534, truediv_133], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf327, buf326, buf322, buf329, buf325, buf330, 1024, grid=grid(1024), stream=stream0)
buf332 = buf322; del buf322 # reuse
# Topologically Sorted Source Nodes: [add_532, log_133, add_524, log_131, logsumexp_131, sub_131, mul_131, u_66, logsumexp_133, sub_133, mul_133, u_67], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf332, arg0_1, buf330, buf327, buf326, 256, grid=grid(256), stream=stream0)
buf334 = buf327; del buf327 # reuse
buf335 = buf334; del buf334 # reuse
# Topologically Sorted Source Nodes: [v_67, add_536, log_134, logsumexp_134, sub_134, mul_134, v_68], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf335, arg2_1, buf332, buf329, buf325, arg1_1, 256, grid=grid(256), stream=stream0)
buf336 = reinterpret_tensor(buf329, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf329 # reuse
buf337 = buf325; del buf325 # reuse
# Topologically Sorted Source Nodes: [neg_135, add_541, add_542, truediv_135, logsumexp_135], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf332, buf335, buf336, buf337, 256, grid=grid(256), stream=stream0)
buf338 = buf330; del buf330 # reuse
# Topologically Sorted Source Nodes: [neg_136, add_545, add_546, truediv_136], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf337, buf336, buf332, buf335, buf338, 1024, grid=grid(1024), stream=stream0)
buf339 = reinterpret_tensor(buf326, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf326 # reuse
# Topologically Sorted Source Nodes: [add_544, log_136, logsumexp_136, sub_136, mul_136], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf338, buf339, 256, grid=grid(256), stream=stream0)
buf340 = buf338; del buf338 # reuse
# Topologically Sorted Source Nodes: [neg_137, add_549, add_550, truediv_137], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf337, buf336, buf332, buf339, buf335, buf340, 1024, grid=grid(1024), stream=stream0)
buf342 = buf332; del buf332 # reuse
# Topologically Sorted Source Nodes: [add_548, log_137, add_540, log_135, logsumexp_135, sub_135, mul_135, u_68, logsumexp_137, sub_137, mul_137, u_69], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf342, arg0_1, buf340, buf337, buf336, 256, grid=grid(256), stream=stream0)
buf344 = buf337; del buf337 # reuse
buf345 = buf344; del buf344 # reuse
# Topologically Sorted Source Nodes: [v_69, add_552, log_138, logsumexp_138, sub_138, mul_138, v_70], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf345, arg2_1, buf342, buf339, buf335, arg1_1, 256, grid=grid(256), stream=stream0)
buf346 = reinterpret_tensor(buf339, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf339 # reuse
buf347 = buf335; del buf335 # reuse
# Topologically Sorted Source Nodes: [neg_139, add_557, add_558, truediv_139, logsumexp_139], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf342, buf345, buf346, buf347, 256, grid=grid(256), stream=stream0)
buf348 = buf340; del buf340 # reuse
# Topologically Sorted Source Nodes: [neg_140, add_561, add_562, truediv_140], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf347, buf346, buf342, buf345, buf348, 1024, grid=grid(1024), stream=stream0)
buf349 = reinterpret_tensor(buf336, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf336 # reuse
# Topologically Sorted Source Nodes: [add_560, log_140, logsumexp_140, sub_140, mul_140], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf348, buf349, 256, grid=grid(256), stream=stream0)
buf350 = buf348; del buf348 # reuse
# Topologically Sorted Source Nodes: [neg_141, add_565, add_566, truediv_141], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf347, buf346, buf342, buf349, buf345, buf350, 1024, grid=grid(1024), stream=stream0)
buf352 = buf342; del buf342 # reuse
# Topologically Sorted Source Nodes: [add_564, log_141, add_556, log_139, logsumexp_139, sub_139, mul_139, u_70, logsumexp_141, sub_141, mul_141, u_71], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf352, arg0_1, buf350, buf347, buf346, 256, grid=grid(256), stream=stream0)
buf354 = buf347; del buf347 # reuse
buf355 = buf354; del buf354 # reuse
# Topologically Sorted Source Nodes: [v_71, add_568, log_142, logsumexp_142, sub_142, mul_142, v_72], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf355, arg2_1, buf352, buf349, buf345, arg1_1, 256, grid=grid(256), stream=stream0)
buf356 = reinterpret_tensor(buf349, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf349 # reuse
buf357 = buf345; del buf345 # reuse
# Topologically Sorted Source Nodes: [neg_143, add_573, add_574, truediv_143, logsumexp_143], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf352, buf355, buf356, buf357, 256, grid=grid(256), stream=stream0)
buf358 = buf350; del buf350 # reuse
# Topologically Sorted Source Nodes: [neg_144, add_577, add_578, truediv_144], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf357, buf356, buf352, buf355, buf358, 1024, grid=grid(1024), stream=stream0)
buf359 = reinterpret_tensor(buf346, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf346 # reuse
# Topologically Sorted Source Nodes: [add_576, log_144, logsumexp_144, sub_144, mul_144], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf358, buf359, 256, grid=grid(256), stream=stream0)
buf360 = buf358; del buf358 # reuse
# Topologically Sorted Source Nodes: [neg_145, add_581, add_582, truediv_145], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf357, buf356, buf352, buf359, buf355, buf360, 1024, grid=grid(1024), stream=stream0)
buf362 = buf352; del buf352 # reuse
# Topologically Sorted Source Nodes: [add_580, log_145, add_572, log_143, logsumexp_143, sub_143, mul_143, u_72, logsumexp_145, sub_145, mul_145, u_73], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf362, arg0_1, buf360, buf357, buf356, 256, grid=grid(256), stream=stream0)
buf364 = buf357; del buf357 # reuse
buf365 = buf364; del buf364 # reuse
# Topologically Sorted Source Nodes: [v_73, add_584, log_146, logsumexp_146, sub_146, mul_146, v_74], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf365, arg2_1, buf362, buf359, buf355, arg1_1, 256, grid=grid(256), stream=stream0)
buf366 = reinterpret_tensor(buf359, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf359 # reuse
buf367 = buf355; del buf355 # reuse
# Topologically Sorted Source Nodes: [neg_147, add_589, add_590, truediv_147, logsumexp_147], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf362, buf365, buf366, buf367, 256, grid=grid(256), stream=stream0)
buf368 = buf360; del buf360 # reuse
# Topologically Sorted Source Nodes: [neg_148, add_593, add_594, truediv_148], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf367, buf366, buf362, buf365, buf368, 1024, grid=grid(1024), stream=stream0)
buf369 = reinterpret_tensor(buf356, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf356 # reuse
# Topologically Sorted Source Nodes: [add_592, log_148, logsumexp_148, sub_148, mul_148], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf368, buf369, 256, grid=grid(256), stream=stream0)
buf370 = buf368; del buf368 # reuse
# Topologically Sorted Source Nodes: [neg_149, add_597, add_598, truediv_149], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf367, buf366, buf362, buf369, buf365, buf370, 1024, grid=grid(1024), stream=stream0)
buf372 = buf362; del buf362 # reuse
# Topologically Sorted Source Nodes: [add_596, log_149, add_588, log_147, logsumexp_147, sub_147, mul_147, u_74, logsumexp_149, sub_149, mul_149, u_75], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf372, arg0_1, buf370, buf367, buf366, 256, grid=grid(256), stream=stream0)
buf374 = buf367; del buf367 # reuse
buf375 = buf374; del buf374 # reuse
# Topologically Sorted Source Nodes: [v_75, add_600, log_150, logsumexp_150, sub_150, mul_150, v_76], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf375, arg2_1, buf372, buf369, buf365, arg1_1, 256, grid=grid(256), stream=stream0)
buf376 = reinterpret_tensor(buf369, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf369 # reuse
buf377 = buf365; del buf365 # reuse
# Topologically Sorted Source Nodes: [neg_151, add_605, add_606, truediv_151, logsumexp_151], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf372, buf375, buf376, buf377, 256, grid=grid(256), stream=stream0)
buf378 = buf370; del buf370 # reuse
# Topologically Sorted Source Nodes: [neg_152, add_609, add_610, truediv_152], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf377, buf376, buf372, buf375, buf378, 1024, grid=grid(1024), stream=stream0)
buf379 = reinterpret_tensor(buf366, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf366 # reuse
# Topologically Sorted Source Nodes: [add_608, log_152, logsumexp_152, sub_152, mul_152], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf378, buf379, 256, grid=grid(256), stream=stream0)
buf380 = buf378; del buf378 # reuse
# Topologically Sorted Source Nodes: [neg_153, add_613, add_614, truediv_153], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf377, buf376, buf372, buf379, buf375, buf380, 1024, grid=grid(1024), stream=stream0)
buf382 = buf372; del buf372 # reuse
# Topologically Sorted Source Nodes: [add_612, log_153, add_604, log_151, logsumexp_151, sub_151, mul_151, u_76, logsumexp_153, sub_153, mul_153, u_77], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf382, arg0_1, buf380, buf377, buf376, 256, grid=grid(256), stream=stream0)
buf384 = buf377; del buf377 # reuse
buf385 = buf384; del buf384 # reuse
# Topologically Sorted Source Nodes: [v_77, add_616, log_154, logsumexp_154, sub_154, mul_154, v_78], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf385, arg2_1, buf382, buf379, buf375, arg1_1, 256, grid=grid(256), stream=stream0)
buf386 = reinterpret_tensor(buf379, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf379 # reuse
buf387 = buf375; del buf375 # reuse
# Topologically Sorted Source Nodes: [neg_155, add_621, add_622, truediv_155, logsumexp_155], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf382, buf385, buf386, buf387, 256, grid=grid(256), stream=stream0)
buf388 = buf380; del buf380 # reuse
# Topologically Sorted Source Nodes: [neg_156, add_625, add_626, truediv_156], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf387, buf386, buf382, buf385, buf388, 1024, grid=grid(1024), stream=stream0)
buf389 = reinterpret_tensor(buf376, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf376 # reuse
# Topologically Sorted Source Nodes: [add_624, log_156, logsumexp_156, sub_156, mul_156], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf388, buf389, 256, grid=grid(256), stream=stream0)
buf390 = buf388; del buf388 # reuse
# Topologically Sorted Source Nodes: [neg_157, add_629, add_630, truediv_157], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf387, buf386, buf382, buf389, buf385, buf390, 1024, grid=grid(1024), stream=stream0)
buf392 = buf382; del buf382 # reuse
# Topologically Sorted Source Nodes: [add_628, log_157, add_620, log_155, logsumexp_155, sub_155, mul_155, u_78, logsumexp_157, sub_157, mul_157, u_79], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf392, arg0_1, buf390, buf387, buf386, 256, grid=grid(256), stream=stream0)
buf394 = buf387; del buf387 # reuse
buf395 = buf394; del buf394 # reuse
# Topologically Sorted Source Nodes: [v_79, add_632, log_158, logsumexp_158, sub_158, mul_158, v_80], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf395, arg2_1, buf392, buf389, buf385, arg1_1, 256, grid=grid(256), stream=stream0)
buf396 = reinterpret_tensor(buf389, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf389 # reuse
buf397 = buf385; del buf385 # reuse
# Topologically Sorted Source Nodes: [neg_159, add_637, add_638, truediv_159, logsumexp_159], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf392, buf395, buf396, buf397, 256, grid=grid(256), stream=stream0)
buf398 = buf390; del buf390 # reuse
# Topologically Sorted Source Nodes: [neg_160, add_641, add_642, truediv_160], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf397, buf396, buf392, buf395, buf398, 1024, grid=grid(1024), stream=stream0)
buf399 = reinterpret_tensor(buf386, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf386 # reuse
# Topologically Sorted Source Nodes: [add_640, log_160, logsumexp_160, sub_160, mul_160], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf398, buf399, 256, grid=grid(256), stream=stream0)
buf400 = buf398; del buf398 # reuse
# Topologically Sorted Source Nodes: [neg_161, add_645, add_646, truediv_161], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf397, buf396, buf392, buf399, buf395, buf400, 1024, grid=grid(1024), stream=stream0)
buf402 = buf392; del buf392 # reuse
# Topologically Sorted Source Nodes: [add_644, log_161, add_636, log_159, logsumexp_159, sub_159, mul_159, u_80, logsumexp_161, sub_161, mul_161, u_81], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf402, arg0_1, buf400, buf397, buf396, 256, grid=grid(256), stream=stream0)
buf404 = buf397; del buf397 # reuse
buf405 = buf404; del buf404 # reuse
# Topologically Sorted Source Nodes: [v_81, add_648, log_162, logsumexp_162, sub_162, mul_162, v_82], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf405, arg2_1, buf402, buf399, buf395, arg1_1, 256, grid=grid(256), stream=stream0)
buf406 = reinterpret_tensor(buf399, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf399 # reuse
buf407 = buf395; del buf395 # reuse
# Topologically Sorted Source Nodes: [neg_163, add_653, add_654, truediv_163, logsumexp_163], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf402, buf405, buf406, buf407, 256, grid=grid(256), stream=stream0)
buf408 = buf400; del buf400 # reuse
# Topologically Sorted Source Nodes: [neg_164, add_657, add_658, truediv_164], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf407, buf406, buf402, buf405, buf408, 1024, grid=grid(1024), stream=stream0)
buf409 = reinterpret_tensor(buf396, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf396 # reuse
# Topologically Sorted Source Nodes: [add_656, log_164, logsumexp_164, sub_164, mul_164], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf408, buf409, 256, grid=grid(256), stream=stream0)
buf410 = buf408; del buf408 # reuse
# Topologically Sorted Source Nodes: [neg_165, add_661, add_662, truediv_165], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf407, buf406, buf402, buf409, buf405, buf410, 1024, grid=grid(1024), stream=stream0)
buf412 = buf402; del buf402 # reuse
# Topologically Sorted Source Nodes: [add_660, log_165, add_652, log_163, logsumexp_163, sub_163, mul_163, u_82, logsumexp_165, sub_165, mul_165, u_83], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf412, arg0_1, buf410, buf407, buf406, 256, grid=grid(256), stream=stream0)
buf414 = buf407; del buf407 # reuse
buf415 = buf414; del buf414 # reuse
# Topologically Sorted Source Nodes: [v_83, add_664, log_166, logsumexp_166, sub_166, mul_166, v_84], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf415, arg2_1, buf412, buf409, buf405, arg1_1, 256, grid=grid(256), stream=stream0)
buf416 = reinterpret_tensor(buf409, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf409 # reuse
buf417 = buf405; del buf405 # reuse
# Topologically Sorted Source Nodes: [neg_167, add_669, add_670, truediv_167, logsumexp_167], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf412, buf415, buf416, buf417, 256, grid=grid(256), stream=stream0)
buf418 = buf410; del buf410 # reuse
# Topologically Sorted Source Nodes: [neg_168, add_673, add_674, truediv_168], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf417, buf416, buf412, buf415, buf418, 1024, grid=grid(1024), stream=stream0)
buf419 = reinterpret_tensor(buf406, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf406 # reuse
# Topologically Sorted Source Nodes: [add_672, log_168, logsumexp_168, sub_168, mul_168], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf418, buf419, 256, grid=grid(256), stream=stream0)
buf420 = buf418; del buf418 # reuse
# Topologically Sorted Source Nodes: [neg_169, add_677, add_678, truediv_169], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf417, buf416, buf412, buf419, buf415, buf420, 1024, grid=grid(1024), stream=stream0)
buf422 = buf412; del buf412 # reuse
# Topologically Sorted Source Nodes: [add_676, log_169, add_668, log_167, logsumexp_167, sub_167, mul_167, u_84, logsumexp_169, sub_169, mul_169, u_85], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf422, arg0_1, buf420, buf417, buf416, 256, grid=grid(256), stream=stream0)
buf424 = buf417; del buf417 # reuse
buf425 = buf424; del buf424 # reuse
# Topologically Sorted Source Nodes: [v_85, add_680, log_170, logsumexp_170, sub_170, mul_170, v_86], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf425, arg2_1, buf422, buf419, buf415, arg1_1, 256, grid=grid(256), stream=stream0)
buf426 = reinterpret_tensor(buf419, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf419 # reuse
buf427 = buf415; del buf415 # reuse
# Topologically Sorted Source Nodes: [neg_171, add_685, add_686, truediv_171, logsumexp_171], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf422, buf425, buf426, buf427, 256, grid=grid(256), stream=stream0)
buf428 = buf420; del buf420 # reuse
# Topologically Sorted Source Nodes: [neg_172, add_689, add_690, truediv_172], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf427, buf426, buf422, buf425, buf428, 1024, grid=grid(1024), stream=stream0)
buf429 = reinterpret_tensor(buf416, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf416 # reuse
# Topologically Sorted Source Nodes: [add_688, log_172, logsumexp_172, sub_172, mul_172], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf428, buf429, 256, grid=grid(256), stream=stream0)
buf430 = buf428; del buf428 # reuse
# Topologically Sorted Source Nodes: [neg_173, add_693, add_694, truediv_173], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf427, buf426, buf422, buf429, buf425, buf430, 1024, grid=grid(1024), stream=stream0)
buf432 = buf422; del buf422 # reuse
# Topologically Sorted Source Nodes: [add_692, log_173, add_684, log_171, logsumexp_171, sub_171, mul_171, u_86, logsumexp_173, sub_173, mul_173, u_87], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf432, arg0_1, buf430, buf427, buf426, 256, grid=grid(256), stream=stream0)
buf434 = buf427; del buf427 # reuse
buf435 = buf434; del buf434 # reuse
# Topologically Sorted Source Nodes: [v_87, add_696, log_174, logsumexp_174, sub_174, mul_174, v_88], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf435, arg2_1, buf432, buf429, buf425, arg1_1, 256, grid=grid(256), stream=stream0)
buf436 = reinterpret_tensor(buf429, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf429 # reuse
buf437 = buf425; del buf425 # reuse
# Topologically Sorted Source Nodes: [neg_175, add_701, add_702, truediv_175, logsumexp_175], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf432, buf435, buf436, buf437, 256, grid=grid(256), stream=stream0)
buf438 = buf430; del buf430 # reuse
# Topologically Sorted Source Nodes: [neg_176, add_705, add_706, truediv_176], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf437, buf436, buf432, buf435, buf438, 1024, grid=grid(1024), stream=stream0)
buf439 = reinterpret_tensor(buf426, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf426 # reuse
# Topologically Sorted Source Nodes: [add_704, log_176, logsumexp_176, sub_176, mul_176], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf438, buf439, 256, grid=grid(256), stream=stream0)
buf440 = buf438; del buf438 # reuse
# Topologically Sorted Source Nodes: [neg_177, add_709, add_710, truediv_177], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf437, buf436, buf432, buf439, buf435, buf440, 1024, grid=grid(1024), stream=stream0)
buf442 = buf432; del buf432 # reuse
# Topologically Sorted Source Nodes: [add_708, log_177, add_700, log_175, logsumexp_175, sub_175, mul_175, u_88, logsumexp_177, sub_177, mul_177, u_89], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf442, arg0_1, buf440, buf437, buf436, 256, grid=grid(256), stream=stream0)
buf444 = buf437; del buf437 # reuse
buf445 = buf444; del buf444 # reuse
# Topologically Sorted Source Nodes: [v_89, add_712, log_178, logsumexp_178, sub_178, mul_178, v_90], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf445, arg2_1, buf442, buf439, buf435, arg1_1, 256, grid=grid(256), stream=stream0)
buf446 = reinterpret_tensor(buf439, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf439 # reuse
buf447 = buf435; del buf435 # reuse
# Topologically Sorted Source Nodes: [neg_179, add_717, add_718, truediv_179, logsumexp_179], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf442, buf445, buf446, buf447, 256, grid=grid(256), stream=stream0)
buf448 = buf440; del buf440 # reuse
# Topologically Sorted Source Nodes: [neg_180, add_721, add_722, truediv_180], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf447, buf446, buf442, buf445, buf448, 1024, grid=grid(1024), stream=stream0)
buf449 = reinterpret_tensor(buf436, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf436 # reuse
# Topologically Sorted Source Nodes: [add_720, log_180, logsumexp_180, sub_180, mul_180], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf448, buf449, 256, grid=grid(256), stream=stream0)
buf450 = buf448; del buf448 # reuse
# Topologically Sorted Source Nodes: [neg_181, add_725, add_726, truediv_181], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf447, buf446, buf442, buf449, buf445, buf450, 1024, grid=grid(1024), stream=stream0)
buf452 = buf442; del buf442 # reuse
# Topologically Sorted Source Nodes: [add_724, log_181, add_716, log_179, logsumexp_179, sub_179, mul_179, u_90, logsumexp_181, sub_181, mul_181, u_91], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf452, arg0_1, buf450, buf447, buf446, 256, grid=grid(256), stream=stream0)
buf454 = buf447; del buf447 # reuse
buf455 = buf454; del buf454 # reuse
# Topologically Sorted Source Nodes: [v_91, add_728, log_182, logsumexp_182, sub_182, mul_182, v_92], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf455, arg2_1, buf452, buf449, buf445, arg1_1, 256, grid=grid(256), stream=stream0)
buf456 = reinterpret_tensor(buf449, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf449 # reuse
buf457 = buf445; del buf445 # reuse
# Topologically Sorted Source Nodes: [neg_183, add_733, add_734, truediv_183, logsumexp_183], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf452, buf455, buf456, buf457, 256, grid=grid(256), stream=stream0)
buf458 = buf450; del buf450 # reuse
# Topologically Sorted Source Nodes: [neg_184, add_737, add_738, truediv_184], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf457, buf456, buf452, buf455, buf458, 1024, grid=grid(1024), stream=stream0)
buf459 = reinterpret_tensor(buf446, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf446 # reuse
# Topologically Sorted Source Nodes: [add_736, log_184, logsumexp_184, sub_184, mul_184], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf458, buf459, 256, grid=grid(256), stream=stream0)
buf460 = buf458; del buf458 # reuse
# Topologically Sorted Source Nodes: [neg_185, add_741, add_742, truediv_185], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf457, buf456, buf452, buf459, buf455, buf460, 1024, grid=grid(1024), stream=stream0)
buf462 = buf452; del buf452 # reuse
# Topologically Sorted Source Nodes: [add_740, log_185, add_732, log_183, logsumexp_183, sub_183, mul_183, u_92, logsumexp_185, sub_185, mul_185, u_93], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf462, arg0_1, buf460, buf457, buf456, 256, grid=grid(256), stream=stream0)
buf464 = buf457; del buf457 # reuse
buf465 = buf464; del buf464 # reuse
# Topologically Sorted Source Nodes: [v_93, add_744, log_186, logsumexp_186, sub_186, mul_186, v_94], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf465, arg2_1, buf462, buf459, buf455, arg1_1, 256, grid=grid(256), stream=stream0)
buf466 = reinterpret_tensor(buf459, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf459 # reuse
buf467 = buf455; del buf455 # reuse
# Topologically Sorted Source Nodes: [neg_187, add_749, add_750, truediv_187, logsumexp_187], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf462, buf465, buf466, buf467, 256, grid=grid(256), stream=stream0)
buf468 = buf460; del buf460 # reuse
# Topologically Sorted Source Nodes: [neg_188, add_753, add_754, truediv_188], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf467, buf466, buf462, buf465, buf468, 1024, grid=grid(1024), stream=stream0)
buf469 = reinterpret_tensor(buf456, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf456 # reuse
# Topologically Sorted Source Nodes: [add_752, log_188, logsumexp_188, sub_188, mul_188], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf468, buf469, 256, grid=grid(256), stream=stream0)
buf470 = buf468; del buf468 # reuse
# Topologically Sorted Source Nodes: [neg_189, add_757, add_758, truediv_189], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf467, buf466, buf462, buf469, buf465, buf470, 1024, grid=grid(1024), stream=stream0)
buf472 = buf462; del buf462 # reuse
# Topologically Sorted Source Nodes: [add_756, log_189, add_748, log_187, logsumexp_187, sub_187, mul_187, u_94, logsumexp_189, sub_189, mul_189, u_95], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf472, arg0_1, buf470, buf467, buf466, 256, grid=grid(256), stream=stream0)
buf474 = buf467; del buf467 # reuse
buf475 = buf474; del buf474 # reuse
# Topologically Sorted Source Nodes: [v_95, add_760, log_190, logsumexp_190, sub_190, mul_190, v_96], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf475, arg2_1, buf472, buf469, buf465, arg1_1, 256, grid=grid(256), stream=stream0)
buf476 = reinterpret_tensor(buf469, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf469 # reuse
buf477 = buf465; del buf465 # reuse
# Topologically Sorted Source Nodes: [neg_191, add_765, add_766, truediv_191, logsumexp_191], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf472, buf475, buf476, buf477, 256, grid=grid(256), stream=stream0)
buf478 = buf470; del buf470 # reuse
# Topologically Sorted Source Nodes: [neg_192, add_769, add_770, truediv_192], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf477, buf476, buf472, buf475, buf478, 1024, grid=grid(1024), stream=stream0)
buf479 = reinterpret_tensor(buf466, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf466 # reuse
# Topologically Sorted Source Nodes: [add_768, log_192, logsumexp_192, sub_192, mul_192], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf478, buf479, 256, grid=grid(256), stream=stream0)
buf480 = buf478; del buf478 # reuse
# Topologically Sorted Source Nodes: [neg_193, add_773, add_774, truediv_193], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf477, buf476, buf472, buf479, buf475, buf480, 1024, grid=grid(1024), stream=stream0)
buf482 = buf472; del buf472 # reuse
# Topologically Sorted Source Nodes: [add_772, log_193, add_764, log_191, logsumexp_191, sub_191, mul_191, u_96, logsumexp_193, sub_193, mul_193, u_97], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf482, arg0_1, buf480, buf477, buf476, 256, grid=grid(256), stream=stream0)
buf484 = buf477; del buf477 # reuse
buf485 = buf484; del buf484 # reuse
# Topologically Sorted Source Nodes: [v_97, add_776, log_194, logsumexp_194, sub_194, mul_194, v_98], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf485, arg2_1, buf482, buf479, buf475, arg1_1, 256, grid=grid(256), stream=stream0)
buf486 = reinterpret_tensor(buf479, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf479 # reuse
buf487 = buf475; del buf475 # reuse
# Topologically Sorted Source Nodes: [neg_195, add_781, add_782, truediv_195, logsumexp_195], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf482, buf485, buf486, buf487, 256, grid=grid(256), stream=stream0)
buf488 = buf480; del buf480 # reuse
# Topologically Sorted Source Nodes: [neg_196, add_785, add_786, truediv_196], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_14.run(arg2_1, arg0_1, buf487, buf486, buf482, buf485, buf488, 1024, grid=grid(1024), stream=stream0)
buf489 = reinterpret_tensor(buf476, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf476 # reuse
# Topologically Sorted Source Nodes: [add_784, log_196, logsumexp_196, sub_196, mul_196], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_4.run(arg1_1, buf488, buf489, 256, grid=grid(256), stream=stream0)
buf490 = buf488; del buf488 # reuse
# Topologically Sorted Source Nodes: [neg_197, add_789, add_790, truediv_197], Original ATen: [aten.neg, aten.add, aten.div]
triton_poi_fused_add_div_neg_15.run(arg2_1, arg0_1, buf487, buf486, buf482, buf489, buf485, buf490, 1024, grid=grid(1024), stream=stream0)
buf492 = buf482; del buf482 # reuse
# Topologically Sorted Source Nodes: [add_788, log_197, add_780, log_195, logsumexp_195, sub_195, mul_195, u_98, logsumexp_197, sub_197, mul_197, u_99], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_16.run(buf492, arg0_1, buf490, buf487, buf486, 256, grid=grid(256), stream=stream0)
del buf486
buf494 = buf487; del buf487 # reuse
buf495 = buf494; del buf494 # reuse
# Topologically Sorted Source Nodes: [v_99, add_792, log_198, logsumexp_198, sub_198, mul_198, v_100], Original ATen: [aten.add, aten.log, aten.logsumexp, aten.sub, aten.mul]
triton_poi_fused_add_log_logsumexp_mul_sub_17.run(buf495, arg2_1, buf492, buf489, buf485, arg1_1, 256, grid=grid(256), stream=stream0)
del arg1_1
buf496 = reinterpret_tensor(buf489, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf489 # reuse
buf497 = buf485; del buf485 # reuse
# Topologically Sorted Source Nodes: [neg_199, add_797, add_798, truediv_199, logsumexp_199], Original ATen: [aten.neg, aten.add, aten.div, aten.logsumexp]
triton_poi_fused_add_div_logsumexp_neg_13.run(arg2_1, buf492, buf495, buf496, buf497, 256, grid=grid(256), stream=stream0)
buf498 = buf490; del buf490 # reuse
buf499 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [neg_200, add_800, add_801, truediv_200, exp, mul_200, cost], Original ATen: [aten.neg, aten.add, aten.div, aten.exp, aten.mul, aten.sum]
triton_per_fused_add_div_exp_mul_neg_sum_19.run(arg2_1, arg0_1, buf497, buf496, buf492, buf495, buf498, buf499, 64, 16, grid=grid(64), stream=stream0)
del arg0_1
del arg2_1
del buf492
del buf495
del buf496
del buf497
return (buf499, buf498, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
class SinkhornDistance(torch.nn.Module):
"""
Given two empirical measures each with :math:`P_1` locations
:math:`x\\in\\mathbb{R}^{D_1}` and :math:`P_2` locations :math:`y\\in\\mathbb{R}^{D_2}`,
outputs an approximation of the regularized OT cost for point clouds.
Args:
eps (float): regularization coefficient
max_iter (int): maximum number of Sinkhorn iterations
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the sum of the output will be divided by the number of
elements in the output, 'sum': the output will be summed. Default: 'none'
Shape:
- Input: :math:`(N, P_1, D_1)`, :math:`(N, P_2, D_2)`
- Output: :math:`(N)` or :math:`()`, depending on `reduction`
"""
def __init__(self, eps=0.001, max_iter=100, reduction='none'):
super(SinkhornDistance, self).__init__()
self.eps = eps
self.max_iter = max_iter
self.reduction = reduction
def forward(self, mu, nu, C):
u = torch.ones_like(mu)
v = torch.ones_like(nu)
for i in range(self.max_iter):
v = self.eps * (torch.log(nu + 1e-08) - torch.logsumexp(self.M(
C, u, v).transpose(-2, -1), dim=-1)) + v
u = self.eps * (torch.log(mu + 1e-08) - torch.logsumexp(self.M(
C, u, v), dim=-1)) + u
U, V = u, v
pi = torch.exp(self.M(C, U, V)).detach()
cost = torch.sum(pi * C, dim=(-2, -1))
return cost, pi
def M(self, C, u, v):
"""
"Modified cost for logarithmic updates"
"$M_{ij} = (-c_{ij} + u_i + v_j) / epsilon$"
"""
return (-C + u.unsqueeze(-1) + v.unsqueeze(-2)) / self.eps
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_logsumexp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x0 = xindex % 4
x2 = xindex // 16 % 16
x5 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp3 + tmp2
tmp5 = 1000.0
tmp6 = tmp4 * tmp5
tmp8 = -tmp7
tmp9 = tmp8 + tmp2
tmp10 = tmp9 + tmp2
tmp11 = tmp10 * tmp5
tmp13 = -tmp12
tmp14 = tmp13 + tmp2
tmp15 = tmp14 + tmp2
tmp16 = tmp15 * tmp5
tmp17 = triton_helpers.maximum(tmp11, tmp16)
tmp19 = -tmp18
tmp20 = tmp19 + tmp2
tmp21 = tmp20 + tmp2
tmp22 = tmp21 * tmp5
tmp23 = triton_helpers.maximum(tmp17, tmp22)
tmp25 = -tmp24
tmp26 = tmp25 + tmp2
tmp27 = tmp26 + tmp2
tmp28 = tmp27 * tmp5
tmp29 = triton_helpers.maximum(tmp23, tmp28)
tmp30 = tl_math.abs(tmp29)
tmp31 = float('inf')
tmp32 = tmp30 == tmp31
tmp33 = 0.0
tmp34 = tl.where(tmp32, tmp33, tmp29)
tmp35 = tmp6 - tmp34
tl.store(out_ptr0 + x5, tmp35, xmask)
@triton.jit
def triton_poi_fused_logsumexp_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x1 = xindex // 4 % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x3), xmask)
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x3), xmask)
tmp5 = tl.load(in_ptr0 + (8 + x0 + 16 * x3), xmask)
tmp8 = tl.load(in_ptr0 + (12 + x0 + 16 * x3), xmask)
tmp12 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp13 = -tmp12
tmp14 = 1.0
tmp15 = tmp13 + tmp14
tmp16 = tmp15 + tmp14
tmp17 = 1000.0
tmp18 = tmp16 * tmp17
tmp20 = -tmp19
tmp21 = tmp20 + tmp14
tmp22 = tmp21 + tmp14
tmp23 = tmp22 * tmp17
tmp24 = triton_helpers.maximum(tmp18, tmp23)
tmp26 = -tmp25
tmp27 = tmp26 + tmp14
tmp28 = tmp27 + tmp14
tmp29 = tmp28 * tmp17
tmp30 = triton_helpers.maximum(tmp24, tmp29)
tmp32 = -tmp31
tmp33 = tmp32 + tmp14
tmp34 = tmp33 + tmp14
tmp35 = tmp34 * tmp17
tmp36 = triton_helpers.maximum(tmp30, tmp35)
tmp37 = tl_math.abs(tmp36)
tmp38 = float('inf')
tmp39 = tmp37 == tmp38
tmp40 = 0.0
tmp41 = tl.where(tmp39, tmp40, tmp36)
tmp42 = tmp11 + tmp41
tl.store(out_ptr0 + x4, tmp42, xmask)
@triton.jit
def triton_poi_fused_add_div_logsumexp_neg_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 4 * x4, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 4 * x4, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (1 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + (1 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr1 + (2 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp35 = tl.load(in_ptr2 + (2 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp42 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp45 = tl.load(in_ptr1 + (3 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp48 = tl.load(in_ptr2 + (3 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp1 = -tmp0
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp5 = 1e-08
tmp6 = tmp4 + tmp5
tmp7 = tl_math.log(tmp6)
tmp9 = tmp7 - tmp8
tmp10 = 0.001
tmp11 = tmp9 * tmp10
tmp12 = tmp11 + tmp2
tmp13 = tmp3 + tmp12
tmp14 = 1000.0
tmp15 = tmp13 * tmp14
tmp17 = -tmp16
tmp18 = tmp17 + tmp2
tmp20 = tmp19 + tmp5
tmp21 = tl_math.log(tmp20)
tmp23 = tmp21 - tmp22
tmp24 = tmp23 * tmp10
tmp25 = tmp24 + tmp2
tmp26 = tmp18 + tmp25
tmp27 = tmp26 * tmp14
tmp28 = triton_helpers.maximum(tmp15, tmp27)
tmp30 = -tmp29
tmp31 = tmp30 + tmp2
tmp33 = tmp32 + tmp5
tmp34 = tl_math.log(tmp33)
tmp36 = tmp34 - tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp37 + tmp2
tmp39 = tmp31 + tmp38
tmp40 = tmp39 * tmp14
tmp41 = triton_helpers.maximum(tmp28, tmp40)
tmp43 = -tmp42
tmp44 = tmp43 + tmp2
tmp46 = tmp45 + tmp5
tmp47 = tl_math.log(tmp46)
tmp49 = tmp47 - tmp48
tmp50 = tmp49 * tmp10
tmp51 = tmp50 + tmp2
tmp52 = tmp44 + tmp51
tmp53 = tmp52 * tmp14
tmp54 = triton_helpers.maximum(tmp41, tmp53)
tmp55 = tl_math.abs(tmp54)
tmp56 = float('inf')
tmp57 = tmp55 == tmp56
tmp58 = 0.0
tmp59 = tl.where(tmp57, tmp58, tmp54)
tmp60 = tmp15 - tmp59
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp27 - tmp59
tmp63 = tl_math.exp(tmp62)
tmp64 = tmp61 + tmp63
tmp65 = tmp40 - tmp59
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp64 + tmp66
tmp68 = tmp53 - tmp59
tmp69 = tl_math.exp(tmp68)
tmp70 = tmp67 + tmp69
tl.store(out_ptr0 + x5, tmp54, xmask)
tl.store(out_ptr1 + x5, tmp70, xmask)
@triton.jit
def triton_poi_fused_add_div_neg_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = xindex // 4
x0 = xindex % 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr4 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr5 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float('inf')
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp18 = 1.0
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp22 = tmp21 + tmp3
tmp23 = tl_math.log(tmp22)
tmp25 = tmp23 - tmp24
tmp26 = tmp25 * tmp16
tmp27 = tmp26 + tmp18
tmp28 = tmp20 + tmp27
tmp29 = 1000.0
tmp30 = tmp28 * tmp29
tl.store(out_ptr0 + x7, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_sub_4(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = float('inf')
tmp13 = tmp11 == tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = tmp4 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp5 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp9 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp27 + tmp15
tmp29 = tmp3 - tmp28
tmp30 = 0.001
tmp31 = tmp29 * tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_neg_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = xindex // 4
x0 = xindex % 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr4 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr5 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr6 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float('inf')
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp18 = 1.0
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp23 = tmp22 + tmp3
tmp24 = tl_math.log(tmp23)
tmp26 = tmp24 - tmp25
tmp27 = tmp26 * tmp16
tmp28 = tmp27 + tmp18
tmp29 = tmp21 + tmp28
tmp30 = tmp20 + tmp29
tl.store(out_ptr0 + x7, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_div_log_logsumexp_sub_6(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp5 = 1000.0
tmp6 = tmp4 * tmp5
tmp8 = tmp7 * tmp5
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10 * tmp5
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp5
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = float('inf')
tmp18 = tmp16 == tmp17
tmp19 = 0.0
tmp20 = tl.where(tmp18, tmp19, tmp15)
tmp21 = tmp6 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp8 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp11 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp14 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tl_math.log(tmp31)
tmp33 = tmp32 + tmp20
tmp34 = tmp3 - tmp33
tl.store(out_ptr0 + x0, tmp34, xmask)
@triton.jit
def triton_poi_fused_add_neg_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = xindex // 4
x0 = xindex % 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr5 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr6 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr7 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp3 = 0.001
tmp4 = tmp2 * tmp3
tmp6 = 1e-08
tmp7 = tmp5 + tmp6
tmp8 = tl_math.log(tmp7)
tmp10 = tl_math.log(tmp9)
tmp12 = tl_math.abs(tmp11)
tmp13 = float('inf')
tmp14 = tmp12 == tmp13
tmp15 = 0.0
tmp16 = tl.where(tmp14, tmp15, tmp11)
tmp17 = tmp10 + tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp3
tmp20 = 1.0
tmp21 = tmp19 + tmp20
tmp22 = tmp4 + tmp21
tmp23 = tmp1 + tmp22
tmp26 = tmp25 + tmp6
tmp27 = tl_math.log(tmp26)
tmp29 = tmp27 - tmp28
tmp30 = tmp29 * tmp3
tmp31 = tmp30 + tmp20
tmp32 = tmp24 + tmp31
tmp33 = tmp23 + tmp32
tl.store(out_ptr0 + x7, tmp33, xmask)
@triton.jit
def triton_poi_fused_add_log_logsumexp_sub_8(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp5 = 1000.0
tmp6 = tmp4 * tmp5
tmp8 = tmp7 * tmp5
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10 * tmp5
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp5
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = float('inf')
tmp18 = tmp16 == tmp17
tmp19 = 0.0
tmp20 = tl.where(tmp18, tmp19, tmp15)
tmp21 = tmp6 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp8 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp11 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp14 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tl_math.log(tmp31)
tmp33 = tmp32 + tmp20
tmp34 = tmp3 - tmp33
tl.store(out_ptr0 + x2, tmp34, xmask)
@triton.jit
def triton_poi_fused_add_neg_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = xindex // 4
x0 = xindex % 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr5 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr6 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr7 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr8 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp3 = 0.001
tmp4 = tmp2 * tmp3
tmp6 = 1e-08
tmp7 = tmp5 + tmp6
tmp8 = tl_math.log(tmp7)
tmp10 = tl_math.log(tmp9)
tmp12 = tl_math.abs(tmp11)
tmp13 = float('inf')
tmp14 = tmp12 == tmp13
tmp15 = 0.0
tmp16 = tl.where(tmp14, tmp15, tmp11)
tmp17 = tmp10 + tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp3
tmp20 = 1.0
tmp21 = tmp19 + tmp20
tmp22 = tmp4 + tmp21
tmp23 = tmp1 + tmp22
tmp25 = tmp24 * tmp3
tmp28 = tmp27 + tmp6
tmp29 = tl_math.log(tmp28)
tmp31 = tmp29 - tmp30
tmp32 = tmp31 * tmp3
tmp33 = tmp32 + tmp20
tmp34 = tmp26 + tmp33
tmp35 = tmp25 + tmp34
tmp36 = tmp23 + tmp35
tl.store(out_ptr0 + x7, tmp36, xmask)
@triton.jit
def triton_poi_fused_add_div_log_logsumexp_mul_ones_like_sub_10(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp37 = tl.load(in_ptr2 + x0, xmask)
tmp39 = tl.load(in_ptr3 + x0, xmask)
tmp41 = tl.load(in_ptr4 + x0, xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp5 = 1000.0
tmp6 = tmp4 * tmp5
tmp8 = tmp7 * tmp5
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10 * tmp5
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp5
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = float('inf')
tmp18 = tmp16 == tmp17
tmp19 = 0.0
tmp20 = tl.where(tmp18, tmp19, tmp15)
tmp21 = tmp6 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp8 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp11 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp14 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tl_math.log(tmp31)
tmp33 = tmp32 + tmp20
tmp34 = tmp3 - tmp33
tmp35 = 0.001
tmp36 = tmp34 * tmp35
tmp38 = tmp37 * tmp35
tmp40 = tl_math.log(tmp39)
tmp42 = tl_math.abs(tmp41)
tmp43 = tmp42 == tmp17
tmp44 = tl.where(tmp43, tmp19, tmp41)
tmp45 = tmp40 + tmp44
tmp46 = tmp3 - tmp45
tmp47 = tmp46 * tmp35
tmp48 = 1.0
tmp49 = tmp47 + tmp48
tmp50 = tmp38 + tmp49
tmp51 = tmp36 + tmp50
tl.store(in_out_ptr0 + x0, tmp51, xmask)
@triton.jit
def triton_poi_fused_add_div_neg_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = xindex // 4
x0 = xindex % 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x0 + 4 * x6), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr3 + (x0 + 4 * x6), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr4 + (x0 + 4 * x6), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr5 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp3 = tmp1 + tmp2
tmp5 = 0.001
tmp6 = tmp4 * tmp5
tmp9 = 1e-08
tmp10 = tmp8 + tmp9
tmp11 = tl_math.log(tmp10)
tmp13 = tmp11 - tmp12
tmp14 = tmp13 * tmp5
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = tmp7 + tmp16
tmp18 = tmp6 + tmp17
tmp19 = tmp3 + tmp18
tmp20 = 1000.0
tmp21 = tmp19 * tmp20
tl.store(out_ptr0 + x7, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_ones_like_sub_12(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp32 = tl.load(in_ptr2 + x2, xmask)
tmp34 = tl.load(in_ptr3 + x2, xmask)
tmp35 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = float('inf')
tmp13 = tmp11 == tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = tmp4 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp5 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp9 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp27 + tmp15
tmp29 = tmp3 - tmp28
tmp30 = 0.001
tmp31 = tmp29 * tmp30
tmp33 = tmp32 * tmp30
tmp36 = tmp3 - tmp35
tmp37 = tmp36 * tmp30
tmp38 = 1.0
tmp39 = tmp37 + tmp38
tmp40 = tmp34 + tmp39
tmp41 = tmp33 + tmp40
tmp42 = tmp31 + tmp41
tl.store(in_out_ptr0 + x2, tmp42, xmask)
@triton.jit
def triton_poi_fused_add_div_logsumexp_neg_13(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x4 = xindex
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x4, xmask)
tmp4 = tl.load(in_ptr2 + 4 * x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (1 + 4 * x5), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr2 + (2 + 4 * x5), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr2 + (3 + 4 * x5), xmask, eviction_policy='evict_last'
)
tmp1 = -tmp0
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp6 = 1000.0
tmp7 = tmp5 * tmp6
tmp9 = -tmp8
tmp10 = tmp9 + tmp2
tmp12 = tmp10 + tmp11
tmp13 = tmp12 * tmp6
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp16 = -tmp15
tmp17 = tmp16 + tmp2
tmp19 = tmp17 + tmp18
tmp20 = tmp19 * tmp6
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp23 = -tmp22
tmp24 = tmp23 + tmp2
tmp26 = tmp24 + tmp25
tmp27 = tmp26 * tmp6
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tl_math.abs(tmp28)
tmp30 = float('inf')
tmp31 = tmp29 == tmp30
tmp32 = 0.0
tmp33 = tl.where(tmp31, tmp32, tmp28)
tmp34 = tmp7 - tmp33
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp13 - tmp33
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp35 + tmp37
tmp39 = tmp20 - tmp33
tmp40 = tl_math.exp(tmp39)
tmp41 = tmp38 + tmp40
tmp42 = tmp27 - tmp33
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp41 + tmp43
tl.store(out_ptr0 + x4, tmp28, xmask)
tl.store(out_ptr1 + x4, tmp44, xmask)
@triton.jit
def triton_poi_fused_add_div_neg_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = xindex // 4
x0 = xindex % 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr5 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float('inf')
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = 1000.0
tmp24 = tmp22 * tmp23
tl.store(out_ptr0 + x7, tmp24, xmask)
@triton.jit
def triton_poi_fused_add_div_neg_15(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x5 = xindex // 4
x0 = xindex % 4
x6 = xindex // 16
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr5 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr6 + (x0 + 4 * x6), xmask, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float('inf')
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp23 = tmp21 + tmp22
tmp24 = tmp20 + tmp23
tmp25 = 1000.0
tmp26 = tmp24 * tmp25
tl.store(out_ptr0 + x7, tmp26, xmask)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_sub_16(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + x0, xmask)
tmp34 = tl.load(in_ptr3 + x0, xmask)
tmp41 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = float('inf')
tmp13 = tmp11 == tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = tmp4 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp5 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp9 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp27 + tmp15
tmp29 = tmp3 - tmp28
tmp30 = 0.001
tmp31 = tmp29 * tmp30
tmp33 = tl_math.log(tmp32)
tmp35 = tl_math.abs(tmp34)
tmp36 = tmp35 == tmp12
tmp37 = tl.where(tmp36, tmp14, tmp34)
tmp38 = tmp33 + tmp37
tmp39 = tmp3 - tmp38
tmp40 = tmp39 * tmp30
tmp42 = tmp40 + tmp41
tmp43 = tmp31 + tmp42
tl.store(in_out_ptr0 + x0, tmp43, xmask)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_sub_17(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x4 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr1 + 4 * x4, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x3, xmask)
tmp5 = tl.load(in_ptr3 + x3, xmask)
tmp10 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (1 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + (2 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (3 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp47 = tl.load(in_ptr4 + x3, xmask)
tmp1 = -tmp0
tmp3 = tmp1 + tmp2
tmp6 = tmp4 + tmp5
tmp7 = tmp3 + tmp6
tmp8 = 1000.0
tmp9 = tmp7 * tmp8
tmp11 = -tmp10
tmp13 = tmp11 + tmp12
tmp14 = tmp13 + tmp6
tmp15 = tmp14 * tmp8
tmp16 = triton_helpers.maximum(tmp9, tmp15)
tmp18 = -tmp17
tmp20 = tmp18 + tmp19
tmp21 = tmp20 + tmp6
tmp22 = tmp21 * tmp8
tmp23 = triton_helpers.maximum(tmp16, tmp22)
tmp25 = -tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp27 + tmp6
tmp29 = tmp28 * tmp8
tmp30 = triton_helpers.maximum(tmp23, tmp29)
tmp31 = tl_math.abs(tmp30)
tmp32 = float('inf')
tmp33 = tmp31 == tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp33, tmp34, tmp30)
tmp36 = tmp9 - tmp35
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp15 - tmp35
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tmp22 - tmp35
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp40 + tmp42
tmp44 = tmp29 - tmp35
tmp45 = tl_math.exp(tmp44)
tmp46 = tmp43 + tmp45
tmp48 = 1e-08
tmp49 = tmp47 + tmp48
tmp50 = tl_math.log(tmp49)
tmp51 = tl_math.log(tmp46)
tmp52 = tmp51 + tmp35
tmp53 = tmp50 - tmp52
tmp54 = 0.001
tmp55 = tmp53 * tmp54
tmp56 = tmp55 + tmp6
tl.store(in_out_ptr0 + x3, tmp56, xmask)
@triton.jit
def triton_poi_fused_add_log_logsumexp_mul_sub_18(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + x0, xmask)
tmp34 = tl.load(in_ptr3 + x0, xmask)
tmp41 = tl.load(in_ptr4 + x0, xmask)
tmp1 = 1e-08
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = float('inf')
tmp13 = tmp11 == tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = tmp4 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp5 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp9 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp27 + tmp15
tmp29 = tmp3 - tmp28
tmp30 = 0.001
tmp31 = tmp29 * tmp30
tmp33 = tl_math.log(tmp32)
tmp35 = tl_math.abs(tmp34)
tmp36 = tmp35 == tmp12
tmp37 = tl.where(tmp36, tmp14, tmp34)
tmp38 = tmp33 + tmp37
tmp39 = tmp3 - tmp38
tmp40 = tmp39 * tmp30
tmp42 = tmp40 + tmp41
tmp43 = tmp31 + tmp42
tl.store(in_out_ptr0 + x0, tmp43, xmask)
@triton.jit
def triton_per_fused_add_div_exp_mul_neg_sum_19(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r4 = rindex
x0 = xindex % 16
r3 = rindex // 4
x5 = xindex
r2 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r4 + 16 * x0), xmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr1 + (r3 + 4 * x5), xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr2 + (r3 + 4 * x5), xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tl.load(in_ptr3 + (r3 + 4 * x5), xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tl.load(in_ptr4 + (r3 + 4 * x5), xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tl.load(in_ptr5 + (r2 + 4 * x5), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = -tmp0
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp7 = tl_math.log(tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = float('inf')
tmp11 = tmp9 == tmp10
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp8)
tmp14 = tmp7 + tmp13
tmp15 = tmp5 - tmp14
tmp16 = 0.001
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = tmp1 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = 1000.0
tmp24 = tmp22 * tmp23
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp25 * tmp0
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.where(xmask, tmp27, 0)
tmp30 = tl.sum(tmp29, 1)[:, None]
tl.store(out_ptr0 + (r4 + 16 * x5), tmp25, xmask)
tl.store(out_ptr1 + x5, tmp30, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 1, 4),
torch.float32)
get_raw_stream(0)
triton_poi_fused_logsumexp_0[grid(1024)](arg2_1, buf0, 1024, XBLOCK
=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_logsumexp_1[grid(256)](buf0, arg2_1, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256),
torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_logsumexp_neg_2[grid(256)](arg2_1, arg1_1,
buf1, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
)
del buf0
triton_poi_fused_add_div_neg_3[grid(1024)](arg2_1, arg0_1, buf3,
buf2, arg1_1, buf1, buf4, 1024, XBLOCK=256, num_warps=4,
num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused_add_neg_5[grid(1024)](arg2_1, arg0_1, buf3, buf2,
buf5, arg1_1, buf1, buf6, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_log_logsumexp_sub_6[grid(256)](arg0_1,
buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_add_neg_7[grid(1024)](arg2_1, buf7, arg0_1, buf3,
buf2, buf5, arg1_1, buf1, buf8, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_log_logsumexp_sub_8[grid(256)](arg1_1, buf8,
buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = buf8
del buf8
triton_poi_fused_add_neg_9[grid(1024)](arg2_1, buf7, arg0_1, buf3,
buf2, buf9, buf5, arg1_1, buf1, buf10, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = buf11
del buf11
triton_poi_fused_add_div_log_logsumexp_mul_ones_like_sub_10[grid(256)](
buf12, arg0_1, buf10, buf7, buf3, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
buf13 = buf10
del buf10
triton_poi_fused_add_div_neg_11[grid(1024)](arg2_1, buf12, buf9,
buf5, arg1_1, buf1, buf13, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf15 = buf1
del buf1
triton_poi_fused_add_log_logsumexp_mul_ones_like_sub_12[grid(256)](
buf15, arg1_1, buf13, buf9, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf16 = reinterpret_tensor(buf9, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf9
buf17 = buf5
del buf5
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf12,
buf15, buf16, buf17, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf18 = buf13
del buf13
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf17,
buf16, buf12, buf15, buf18, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf19 = buf7
del buf7
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf18, buf19, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf20 = buf18
del buf18
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf17,
buf16, buf12, buf19, buf15, buf20, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf22 = buf12
del buf12
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf22,
arg0_1, buf20, buf17, buf16, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf24 = buf17
del buf17
buf25 = buf24
del buf24
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf25,
arg2_1, buf22, buf19, buf15, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf26 = reinterpret_tensor(buf19, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf19
buf27 = buf15
del buf15
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf22,
buf25, buf26, buf27, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf28 = buf20
del buf20
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf27,
buf26, buf22, buf25, buf28, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf29 = reinterpret_tensor(buf16, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf16
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf28, buf29, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf30 = buf28
del buf28
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf27,
buf26, buf22, buf29, buf25, buf30, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf32 = buf22
del buf22
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf32,
arg0_1, buf30, buf27, buf26, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf34 = buf27
del buf27
buf35 = buf34
del buf34
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf35,
arg2_1, buf32, buf29, buf25, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf36 = reinterpret_tensor(buf29, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf29
buf37 = buf25
del buf25
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf32,
buf35, buf36, buf37, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf38 = buf30
del buf30
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf37,
buf36, buf32, buf35, buf38, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf39 = reinterpret_tensor(buf26, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf26
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf38, buf39, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf40 = buf38
del buf38
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf37,
buf36, buf32, buf39, buf35, buf40, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf42 = buf32
del buf32
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf42,
arg0_1, buf40, buf37, buf36, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf44 = buf37
del buf37
buf45 = buf44
del buf44
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf45,
arg2_1, buf42, buf39, buf35, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf46 = reinterpret_tensor(buf39, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf39
buf47 = buf35
del buf35
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf42,
buf45, buf46, buf47, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf48 = buf40
del buf40
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf47,
buf46, buf42, buf45, buf48, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf49 = reinterpret_tensor(buf36, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf36
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf48, buf49, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf50 = buf48
del buf48
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf47,
buf46, buf42, buf49, buf45, buf50, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf52 = buf42
del buf42
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf52,
arg0_1, buf50, buf47, buf46, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf54 = buf47
del buf47
buf55 = buf54
del buf54
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf55,
arg2_1, buf52, buf49, buf45, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf56 = reinterpret_tensor(buf49, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf49
buf57 = buf45
del buf45
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf52,
buf55, buf56, buf57, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf58 = buf50
del buf50
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf57,
buf56, buf52, buf55, buf58, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf59 = reinterpret_tensor(buf46, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf46
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf58, buf59, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf60 = buf58
del buf58
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf57,
buf56, buf52, buf59, buf55, buf60, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf62 = buf52
del buf52
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf62,
arg0_1, buf60, buf57, buf56, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf64 = buf57
del buf57
buf65 = buf64
del buf64
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf65,
arg2_1, buf62, buf59, buf55, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf66 = reinterpret_tensor(buf59, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf59
buf67 = buf55
del buf55
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf62,
buf65, buf66, buf67, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf68 = buf60
del buf60
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf67,
buf66, buf62, buf65, buf68, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf69 = reinterpret_tensor(buf56, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf56
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf68, buf69, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf70 = buf68
del buf68
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf67,
buf66, buf62, buf69, buf65, buf70, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf72 = buf62
del buf62
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf72,
arg0_1, buf70, buf67, buf66, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf74 = buf67
del buf67
buf75 = buf74
del buf74
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf75,
arg2_1, buf72, buf69, buf65, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf76 = reinterpret_tensor(buf69, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf69
buf77 = buf65
del buf65
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf72,
buf75, buf76, buf77, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf78 = buf70
del buf70
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf77,
buf76, buf72, buf75, buf78, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf79 = reinterpret_tensor(buf66, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf66
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf78, buf79, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf80 = buf78
del buf78
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf77,
buf76, buf72, buf79, buf75, buf80, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf82 = buf72
del buf72
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf82,
arg0_1, buf80, buf77, buf76, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf84 = buf77
del buf77
buf85 = buf84
del buf84
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf85,
arg2_1, buf82, buf79, buf75, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf86 = reinterpret_tensor(buf79, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf79
buf87 = buf75
del buf75
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf82,
buf85, buf86, buf87, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf88 = buf80
del buf80
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf87,
buf86, buf82, buf85, buf88, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf89 = reinterpret_tensor(buf76, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf76
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf88, buf89, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf90 = buf88
del buf88
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf87,
buf86, buf82, buf89, buf85, buf90, 1024, XBLOCK=256, num_warps=
4, num_stages=1)
buf92 = buf82
del buf82
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf92,
arg0_1, buf90, buf87, buf86, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf94 = buf87
del buf87
buf95 = buf94
del buf94
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf95,
arg2_1, buf92, buf89, buf85, arg1_1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf96 = reinterpret_tensor(buf89, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf89
buf97 = buf85
del buf85
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf92,
buf95, buf96, buf97, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf98 = buf90
del buf90
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf97,
buf96, buf92, buf95, buf98, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf99 = reinterpret_tensor(buf86, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf86
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf98, buf99, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf100 = buf98
del buf98
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf97,
buf96, buf92, buf99, buf95, buf100, 1024, XBLOCK=256, num_warps
=4, num_stages=1)
buf101 = buf3
del buf3
buf102 = buf101
del buf101
triton_poi_fused_add_log_logsumexp_mul_sub_18[grid(256)](buf102,
arg0_1, buf100, buf97, buf96, buf92, 256, XBLOCK=128, num_warps
=4, num_stages=1)
del buf92
buf104 = buf97
del buf97
buf105 = buf104
del buf104
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf105,
arg2_1, buf102, buf99, buf95, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf106 = reinterpret_tensor(buf99, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf99
buf107 = buf95
del buf95
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf102,
buf105, buf106, buf107, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf108 = buf100
del buf100
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf107,
buf106, buf102, buf105, buf108, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf109 = reinterpret_tensor(buf96, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf96
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf108, buf109, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf110 = buf108
del buf108
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf107,
buf106, buf102, buf109, buf105, buf110, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf112 = buf102
del buf102
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf112,
arg0_1, buf110, buf107, buf106, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf114 = buf107
del buf107
buf115 = buf114
del buf114
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf115,
arg2_1, buf112, buf109, buf105, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf116 = reinterpret_tensor(buf109, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf109
buf117 = buf105
del buf105
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf112,
buf115, buf116, buf117, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf118 = buf110
del buf110
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf117,
buf116, buf112, buf115, buf118, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf119 = reinterpret_tensor(buf106, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf106
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf118, buf119, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf120 = buf118
del buf118
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf117,
buf116, buf112, buf119, buf115, buf120, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf122 = buf112
del buf112
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf122,
arg0_1, buf120, buf117, buf116, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf124 = buf117
del buf117
buf125 = buf124
del buf124
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf125,
arg2_1, buf122, buf119, buf115, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf126 = reinterpret_tensor(buf119, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf119
buf127 = buf115
del buf115
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf122,
buf125, buf126, buf127, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf128 = buf120
del buf120
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf127,
buf126, buf122, buf125, buf128, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf129 = reinterpret_tensor(buf116, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf116
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf128, buf129, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf130 = buf128
del buf128
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf127,
buf126, buf122, buf129, buf125, buf130, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf132 = buf122
del buf122
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf132,
arg0_1, buf130, buf127, buf126, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf134 = buf127
del buf127
buf135 = buf134
del buf134
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf135,
arg2_1, buf132, buf129, buf125, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf136 = reinterpret_tensor(buf129, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf129
buf137 = buf125
del buf125
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf132,
buf135, buf136, buf137, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf138 = buf130
del buf130
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf137,
buf136, buf132, buf135, buf138, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf139 = reinterpret_tensor(buf126, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf126
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf138, buf139, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf140 = buf138
del buf138
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf137,
buf136, buf132, buf139, buf135, buf140, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf142 = buf132
del buf132
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf142,
arg0_1, buf140, buf137, buf136, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf144 = buf137
del buf137
buf145 = buf144
del buf144
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf145,
arg2_1, buf142, buf139, buf135, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf146 = reinterpret_tensor(buf139, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf139
buf147 = buf135
del buf135
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf142,
buf145, buf146, buf147, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf148 = buf140
del buf140
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf147,
buf146, buf142, buf145, buf148, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf149 = reinterpret_tensor(buf136, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf136
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf148, buf149, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf150 = buf148
del buf148
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf147,
buf146, buf142, buf149, buf145, buf150, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf152 = buf142
del buf142
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf152,
arg0_1, buf150, buf147, buf146, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf154 = buf147
del buf147
buf155 = buf154
del buf154
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf155,
arg2_1, buf152, buf149, buf145, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf156 = reinterpret_tensor(buf149, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf149
buf157 = buf145
del buf145
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf152,
buf155, buf156, buf157, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf158 = buf150
del buf150
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf157,
buf156, buf152, buf155, buf158, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf159 = reinterpret_tensor(buf146, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf146
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf158, buf159, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf160 = buf158
del buf158
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf157,
buf156, buf152, buf159, buf155, buf160, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf162 = buf152
del buf152
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf162,
arg0_1, buf160, buf157, buf156, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf164 = buf157
del buf157
buf165 = buf164
del buf164
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf165,
arg2_1, buf162, buf159, buf155, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf166 = reinterpret_tensor(buf159, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf159
buf167 = buf155
del buf155
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf162,
buf165, buf166, buf167, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf168 = buf160
del buf160
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf167,
buf166, buf162, buf165, buf168, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf169 = reinterpret_tensor(buf156, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf156
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf168, buf169, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf170 = buf168
del buf168
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf167,
buf166, buf162, buf169, buf165, buf170, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf172 = buf162
del buf162
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf172,
arg0_1, buf170, buf167, buf166, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf174 = buf167
del buf167
buf175 = buf174
del buf174
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf175,
arg2_1, buf172, buf169, buf165, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf176 = reinterpret_tensor(buf169, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf169
buf177 = buf165
del buf165
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf172,
buf175, buf176, buf177, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf178 = buf170
del buf170
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf177,
buf176, buf172, buf175, buf178, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf179 = reinterpret_tensor(buf166, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf166
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf178, buf179, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf180 = buf178
del buf178
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf177,
buf176, buf172, buf179, buf175, buf180, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf182 = buf172
del buf172
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf182,
arg0_1, buf180, buf177, buf176, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf184 = buf177
del buf177
buf185 = buf184
del buf184
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf185,
arg2_1, buf182, buf179, buf175, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf186 = reinterpret_tensor(buf179, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf179
buf187 = buf175
del buf175
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf182,
buf185, buf186, buf187, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf188 = buf180
del buf180
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf187,
buf186, buf182, buf185, buf188, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf189 = reinterpret_tensor(buf176, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf176
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf188, buf189, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf190 = buf188
del buf188
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf187,
buf186, buf182, buf189, buf185, buf190, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf192 = buf182
del buf182
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf192,
arg0_1, buf190, buf187, buf186, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf194 = buf187
del buf187
buf195 = buf194
del buf194
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf195,
arg2_1, buf192, buf189, buf185, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf196 = reinterpret_tensor(buf189, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf189
buf197 = buf185
del buf185
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf192,
buf195, buf196, buf197, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf198 = buf190
del buf190
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf197,
buf196, buf192, buf195, buf198, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf199 = reinterpret_tensor(buf186, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf186
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf198, buf199, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf200 = buf198
del buf198
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf197,
buf196, buf192, buf199, buf195, buf200, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf202 = buf192
del buf192
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf202,
arg0_1, buf200, buf197, buf196, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf204 = buf197
del buf197
buf205 = buf204
del buf204
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf205,
arg2_1, buf202, buf199, buf195, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf206 = reinterpret_tensor(buf199, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf199
buf207 = buf195
del buf195
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf202,
buf205, buf206, buf207, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf208 = buf200
del buf200
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf207,
buf206, buf202, buf205, buf208, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf209 = reinterpret_tensor(buf196, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf196
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf208, buf209, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf210 = buf208
del buf208
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf207,
buf206, buf202, buf209, buf205, buf210, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf212 = buf202
del buf202
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf212,
arg0_1, buf210, buf207, buf206, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf214 = buf207
del buf207
buf215 = buf214
del buf214
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf215,
arg2_1, buf212, buf209, buf205, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf216 = reinterpret_tensor(buf209, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf209
buf217 = buf205
del buf205
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf212,
buf215, buf216, buf217, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf218 = buf210
del buf210
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf217,
buf216, buf212, buf215, buf218, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf219 = reinterpret_tensor(buf206, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf206
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf218, buf219, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf220 = buf218
del buf218
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf217,
buf216, buf212, buf219, buf215, buf220, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf222 = buf212
del buf212
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf222,
arg0_1, buf220, buf217, buf216, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf224 = buf217
del buf217
buf225 = buf224
del buf224
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf225,
arg2_1, buf222, buf219, buf215, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf226 = reinterpret_tensor(buf219, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf219
buf227 = buf215
del buf215
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf222,
buf225, buf226, buf227, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf228 = buf220
del buf220
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf227,
buf226, buf222, buf225, buf228, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf229 = reinterpret_tensor(buf216, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf216
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf228, buf229, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf230 = buf228
del buf228
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf227,
buf226, buf222, buf229, buf225, buf230, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf232 = buf222
del buf222
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf232,
arg0_1, buf230, buf227, buf226, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf234 = buf227
del buf227
buf235 = buf234
del buf234
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf235,
arg2_1, buf232, buf229, buf225, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf236 = reinterpret_tensor(buf229, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf229
buf237 = buf225
del buf225
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf232,
buf235, buf236, buf237, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf238 = buf230
del buf230
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf237,
buf236, buf232, buf235, buf238, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf239 = reinterpret_tensor(buf226, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf226
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf238, buf239, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf240 = buf238
del buf238
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf237,
buf236, buf232, buf239, buf235, buf240, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf242 = buf232
del buf232
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf242,
arg0_1, buf240, buf237, buf236, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf244 = buf237
del buf237
buf245 = buf244
del buf244
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf245,
arg2_1, buf242, buf239, buf235, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf246 = reinterpret_tensor(buf239, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf239
buf247 = buf235
del buf235
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf242,
buf245, buf246, buf247, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf248 = buf240
del buf240
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf247,
buf246, buf242, buf245, buf248, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf249 = reinterpret_tensor(buf236, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf236
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf248, buf249, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf250 = buf248
del buf248
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf247,
buf246, buf242, buf249, buf245, buf250, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf252 = buf242
del buf242
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf252,
arg0_1, buf250, buf247, buf246, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf254 = buf247
del buf247
buf255 = buf254
del buf254
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf255,
arg2_1, buf252, buf249, buf245, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf256 = reinterpret_tensor(buf249, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf249
buf257 = buf245
del buf245
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf252,
buf255, buf256, buf257, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf258 = buf250
del buf250
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf257,
buf256, buf252, buf255, buf258, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf259 = reinterpret_tensor(buf246, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf246
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf258, buf259, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf260 = buf258
del buf258
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf257,
buf256, buf252, buf259, buf255, buf260, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf262 = buf252
del buf252
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf262,
arg0_1, buf260, buf257, buf256, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf264 = buf257
del buf257
buf265 = buf264
del buf264
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf265,
arg2_1, buf262, buf259, buf255, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf266 = reinterpret_tensor(buf259, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf259
buf267 = buf255
del buf255
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf262,
buf265, buf266, buf267, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf268 = buf260
del buf260
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf267,
buf266, buf262, buf265, buf268, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf269 = reinterpret_tensor(buf256, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf256
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf268, buf269, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf270 = buf268
del buf268
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf267,
buf266, buf262, buf269, buf265, buf270, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf272 = buf262
del buf262
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf272,
arg0_1, buf270, buf267, buf266, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf274 = buf267
del buf267
buf275 = buf274
del buf274
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf275,
arg2_1, buf272, buf269, buf265, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf276 = reinterpret_tensor(buf269, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf269
buf277 = buf265
del buf265
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf272,
buf275, buf276, buf277, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf278 = buf270
del buf270
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf277,
buf276, buf272, buf275, buf278, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf279 = reinterpret_tensor(buf266, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf266
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf278, buf279, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf280 = buf278
del buf278
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf277,
buf276, buf272, buf279, buf275, buf280, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf282 = buf272
del buf272
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf282,
arg0_1, buf280, buf277, buf276, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf284 = buf277
del buf277
buf285 = buf284
del buf284
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf285,
arg2_1, buf282, buf279, buf275, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf286 = reinterpret_tensor(buf279, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf279
buf287 = buf275
del buf275
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf282,
buf285, buf286, buf287, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf288 = buf280
del buf280
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf287,
buf286, buf282, buf285, buf288, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf289 = reinterpret_tensor(buf276, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf276
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf288, buf289, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf290 = buf288
del buf288
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf287,
buf286, buf282, buf289, buf285, buf290, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf292 = buf282
del buf282
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf292,
arg0_1, buf290, buf287, buf286, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf294 = buf287
del buf287
buf295 = buf294
del buf294
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf295,
arg2_1, buf292, buf289, buf285, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf296 = reinterpret_tensor(buf289, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf289
buf297 = buf285
del buf285
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf292,
buf295, buf296, buf297, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf298 = buf290
del buf290
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf297,
buf296, buf292, buf295, buf298, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf299 = reinterpret_tensor(buf286, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf286
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf298, buf299, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf300 = buf298
del buf298
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf297,
buf296, buf292, buf299, buf295, buf300, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf302 = buf292
del buf292
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf302,
arg0_1, buf300, buf297, buf296, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf304 = buf297
del buf297
buf305 = buf304
del buf304
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf305,
arg2_1, buf302, buf299, buf295, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf306 = reinterpret_tensor(buf299, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf299
buf307 = buf295
del buf295
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf302,
buf305, buf306, buf307, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf308 = buf300
del buf300
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf307,
buf306, buf302, buf305, buf308, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf309 = reinterpret_tensor(buf296, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf296
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf308, buf309, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf310 = buf308
del buf308
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf307,
buf306, buf302, buf309, buf305, buf310, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf312 = buf302
del buf302
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf312,
arg0_1, buf310, buf307, buf306, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf314 = buf307
del buf307
buf315 = buf314
del buf314
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf315,
arg2_1, buf312, buf309, buf305, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf316 = reinterpret_tensor(buf309, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf309
buf317 = buf305
del buf305
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf312,
buf315, buf316, buf317, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf318 = buf310
del buf310
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf317,
buf316, buf312, buf315, buf318, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf319 = reinterpret_tensor(buf306, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf306
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf318, buf319, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf320 = buf318
del buf318
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf317,
buf316, buf312, buf319, buf315, buf320, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf322 = buf312
del buf312
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf322,
arg0_1, buf320, buf317, buf316, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf324 = buf317
del buf317
buf325 = buf324
del buf324
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf325,
arg2_1, buf322, buf319, buf315, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf326 = reinterpret_tensor(buf319, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf319
buf327 = buf315
del buf315
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf322,
buf325, buf326, buf327, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf328 = buf320
del buf320
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf327,
buf326, buf322, buf325, buf328, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf329 = reinterpret_tensor(buf316, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf316
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf328, buf329, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf330 = buf328
del buf328
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf327,
buf326, buf322, buf329, buf325, buf330, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf332 = buf322
del buf322
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf332,
arg0_1, buf330, buf327, buf326, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf334 = buf327
del buf327
buf335 = buf334
del buf334
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf335,
arg2_1, buf332, buf329, buf325, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf336 = reinterpret_tensor(buf329, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf329
buf337 = buf325
del buf325
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf332,
buf335, buf336, buf337, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf338 = buf330
del buf330
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf337,
buf336, buf332, buf335, buf338, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf339 = reinterpret_tensor(buf326, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf326
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf338, buf339, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf340 = buf338
del buf338
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf337,
buf336, buf332, buf339, buf335, buf340, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf342 = buf332
del buf332
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf342,
arg0_1, buf340, buf337, buf336, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf344 = buf337
del buf337
buf345 = buf344
del buf344
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf345,
arg2_1, buf342, buf339, buf335, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf346 = reinterpret_tensor(buf339, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf339
buf347 = buf335
del buf335
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf342,
buf345, buf346, buf347, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf348 = buf340
del buf340
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf347,
buf346, buf342, buf345, buf348, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf349 = reinterpret_tensor(buf336, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf336
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf348, buf349, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf350 = buf348
del buf348
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf347,
buf346, buf342, buf349, buf345, buf350, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf352 = buf342
del buf342
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf352,
arg0_1, buf350, buf347, buf346, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf354 = buf347
del buf347
buf355 = buf354
del buf354
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf355,
arg2_1, buf352, buf349, buf345, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf356 = reinterpret_tensor(buf349, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf349
buf357 = buf345
del buf345
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf352,
buf355, buf356, buf357, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf358 = buf350
del buf350
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf357,
buf356, buf352, buf355, buf358, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf359 = reinterpret_tensor(buf346, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf346
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf358, buf359, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf360 = buf358
del buf358
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf357,
buf356, buf352, buf359, buf355, buf360, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf362 = buf352
del buf352
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf362,
arg0_1, buf360, buf357, buf356, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf364 = buf357
del buf357
buf365 = buf364
del buf364
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf365,
arg2_1, buf362, buf359, buf355, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf366 = reinterpret_tensor(buf359, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf359
buf367 = buf355
del buf355
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf362,
buf365, buf366, buf367, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf368 = buf360
del buf360
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf367,
buf366, buf362, buf365, buf368, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf369 = reinterpret_tensor(buf356, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf356
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf368, buf369, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf370 = buf368
del buf368
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf367,
buf366, buf362, buf369, buf365, buf370, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf372 = buf362
del buf362
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf372,
arg0_1, buf370, buf367, buf366, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf374 = buf367
del buf367
buf375 = buf374
del buf374
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf375,
arg2_1, buf372, buf369, buf365, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf376 = reinterpret_tensor(buf369, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf369
buf377 = buf365
del buf365
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf372,
buf375, buf376, buf377, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf378 = buf370
del buf370
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf377,
buf376, buf372, buf375, buf378, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf379 = reinterpret_tensor(buf366, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf366
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf378, buf379, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf380 = buf378
del buf378
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf377,
buf376, buf372, buf379, buf375, buf380, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf382 = buf372
del buf372
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf382,
arg0_1, buf380, buf377, buf376, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf384 = buf377
del buf377
buf385 = buf384
del buf384
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf385,
arg2_1, buf382, buf379, buf375, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf386 = reinterpret_tensor(buf379, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf379
buf387 = buf375
del buf375
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf382,
buf385, buf386, buf387, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf388 = buf380
del buf380
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf387,
buf386, buf382, buf385, buf388, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf389 = reinterpret_tensor(buf376, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf376
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf388, buf389, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf390 = buf388
del buf388
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf387,
buf386, buf382, buf389, buf385, buf390, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf392 = buf382
del buf382
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf392,
arg0_1, buf390, buf387, buf386, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf394 = buf387
del buf387
buf395 = buf394
del buf394
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf395,
arg2_1, buf392, buf389, buf385, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf396 = reinterpret_tensor(buf389, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf389
buf397 = buf385
del buf385
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf392,
buf395, buf396, buf397, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf398 = buf390
del buf390
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf397,
buf396, buf392, buf395, buf398, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf399 = reinterpret_tensor(buf386, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf386
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf398, buf399, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf400 = buf398
del buf398
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf397,
buf396, buf392, buf399, buf395, buf400, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf402 = buf392
del buf392
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf402,
arg0_1, buf400, buf397, buf396, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf404 = buf397
del buf397
buf405 = buf404
del buf404
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf405,
arg2_1, buf402, buf399, buf395, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf406 = reinterpret_tensor(buf399, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf399
buf407 = buf395
del buf395
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf402,
buf405, buf406, buf407, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf408 = buf400
del buf400
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf407,
buf406, buf402, buf405, buf408, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf409 = reinterpret_tensor(buf396, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf396
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf408, buf409, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf410 = buf408
del buf408
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf407,
buf406, buf402, buf409, buf405, buf410, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf412 = buf402
del buf402
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf412,
arg0_1, buf410, buf407, buf406, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf414 = buf407
del buf407
buf415 = buf414
del buf414
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf415,
arg2_1, buf412, buf409, buf405, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf416 = reinterpret_tensor(buf409, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf409
buf417 = buf405
del buf405
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf412,
buf415, buf416, buf417, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf418 = buf410
del buf410
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf417,
buf416, buf412, buf415, buf418, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf419 = reinterpret_tensor(buf406, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf406
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf418, buf419, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf420 = buf418
del buf418
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf417,
buf416, buf412, buf419, buf415, buf420, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf422 = buf412
del buf412
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf422,
arg0_1, buf420, buf417, buf416, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf424 = buf417
del buf417
buf425 = buf424
del buf424
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf425,
arg2_1, buf422, buf419, buf415, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf426 = reinterpret_tensor(buf419, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf419
buf427 = buf415
del buf415
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf422,
buf425, buf426, buf427, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf428 = buf420
del buf420
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf427,
buf426, buf422, buf425, buf428, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf429 = reinterpret_tensor(buf416, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf416
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf428, buf429, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf430 = buf428
del buf428
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf427,
buf426, buf422, buf429, buf425, buf430, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf432 = buf422
del buf422
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf432,
arg0_1, buf430, buf427, buf426, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf434 = buf427
del buf427
buf435 = buf434
del buf434
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf435,
arg2_1, buf432, buf429, buf425, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf436 = reinterpret_tensor(buf429, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf429
buf437 = buf425
del buf425
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf432,
buf435, buf436, buf437, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf438 = buf430
del buf430
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf437,
buf436, buf432, buf435, buf438, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf439 = reinterpret_tensor(buf426, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf426
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf438, buf439, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf440 = buf438
del buf438
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf437,
buf436, buf432, buf439, buf435, buf440, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf442 = buf432
del buf432
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf442,
arg0_1, buf440, buf437, buf436, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf444 = buf437
del buf437
buf445 = buf444
del buf444
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf445,
arg2_1, buf442, buf439, buf435, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf446 = reinterpret_tensor(buf439, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf439
buf447 = buf435
del buf435
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf442,
buf445, buf446, buf447, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf448 = buf440
del buf440
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf447,
buf446, buf442, buf445, buf448, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf449 = reinterpret_tensor(buf436, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf436
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf448, buf449, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf450 = buf448
del buf448
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf447,
buf446, buf442, buf449, buf445, buf450, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf452 = buf442
del buf442
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf452,
arg0_1, buf450, buf447, buf446, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf454 = buf447
del buf447
buf455 = buf454
del buf454
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf455,
arg2_1, buf452, buf449, buf445, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf456 = reinterpret_tensor(buf449, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf449
buf457 = buf445
del buf445
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf452,
buf455, buf456, buf457, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf458 = buf450
del buf450
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf457,
buf456, buf452, buf455, buf458, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf459 = reinterpret_tensor(buf446, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf446
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf458, buf459, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf460 = buf458
del buf458
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf457,
buf456, buf452, buf459, buf455, buf460, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf462 = buf452
del buf452
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf462,
arg0_1, buf460, buf457, buf456, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf464 = buf457
del buf457
buf465 = buf464
del buf464
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf465,
arg2_1, buf462, buf459, buf455, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf466 = reinterpret_tensor(buf459, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf459
buf467 = buf455
del buf455
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf462,
buf465, buf466, buf467, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf468 = buf460
del buf460
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf467,
buf466, buf462, buf465, buf468, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf469 = reinterpret_tensor(buf456, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf456
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf468, buf469, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf470 = buf468
del buf468
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf467,
buf466, buf462, buf469, buf465, buf470, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf472 = buf462
del buf462
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf472,
arg0_1, buf470, buf467, buf466, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf474 = buf467
del buf467
buf475 = buf474
del buf474
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf475,
arg2_1, buf472, buf469, buf465, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf476 = reinterpret_tensor(buf469, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf469
buf477 = buf465
del buf465
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf472,
buf475, buf476, buf477, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf478 = buf470
del buf470
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf477,
buf476, buf472, buf475, buf478, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf479 = reinterpret_tensor(buf466, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf466
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf478, buf479, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf480 = buf478
del buf478
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf477,
buf476, buf472, buf479, buf475, buf480, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf482 = buf472
del buf472
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf482,
arg0_1, buf480, buf477, buf476, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf484 = buf477
del buf477
buf485 = buf484
del buf484
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf485,
arg2_1, buf482, buf479, buf475, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf486 = reinterpret_tensor(buf479, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf479
buf487 = buf475
del buf475
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf482,
buf485, buf486, buf487, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf488 = buf480
del buf480
triton_poi_fused_add_div_neg_14[grid(1024)](arg2_1, arg0_1, buf487,
buf486, buf482, buf485, buf488, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
buf489 = reinterpret_tensor(buf476, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf476
triton_poi_fused_add_log_logsumexp_mul_sub_4[grid(256)](arg1_1,
buf488, buf489, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf490 = buf488
del buf488
triton_poi_fused_add_div_neg_15[grid(1024)](arg2_1, arg0_1, buf487,
buf486, buf482, buf489, buf485, buf490, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf492 = buf482
del buf482
triton_poi_fused_add_log_logsumexp_mul_sub_16[grid(256)](buf492,
arg0_1, buf490, buf487, buf486, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf486
buf494 = buf487
del buf487
buf495 = buf494
del buf494
triton_poi_fused_add_log_logsumexp_mul_sub_17[grid(256)](buf495,
arg2_1, buf492, buf489, buf485, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg1_1
buf496 = reinterpret_tensor(buf489, (4, 4, 4, 4, 1), (64, 16, 4, 1,
256), 0)
del buf489
buf497 = buf485
del buf485
triton_poi_fused_add_div_logsumexp_neg_13[grid(256)](arg2_1, buf492,
buf495, buf496, buf497, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf498 = buf490
del buf490
buf499 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused_add_div_exp_mul_neg_sum_19[grid(64)](arg2_1,
arg0_1, buf497, buf496, buf492, buf495, buf498, buf499, 64, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
del arg2_1
del buf492
del buf495
del buf496
del buf497
return buf499, buf498
class SinkhornDistanceNew(torch.nn.Module):
"""
Given two empirical measures each with :math:`P_1` locations
:math:`x\\in\\mathbb{R}^{D_1}` and :math:`P_2` locations :math:`y\\in\\mathbb{R}^{D_2}`,
outputs an approximation of the regularized OT cost for point clouds.
Args:
eps (float): regularization coefficient
max_iter (int): maximum number of Sinkhorn iterations
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the sum of the output will be divided by the number of
elements in the output, 'sum': the output will be summed. Default: 'none'
Shape:
- Input: :math:`(N, P_1, D_1)`, :math:`(N, P_2, D_2)`
- Output: :math:`(N)` or :math:`()`, depending on `reduction`
"""
def __init__(self, eps=0.001, max_iter=100, reduction='none'):
super(SinkhornDistanceNew, self).__init__()
self.eps = eps
self.max_iter = max_iter
self.reduction = reduction
def M(self, C, u, v):
"""
"Modified cost for logarithmic updates"
"$M_{ij} = (-c_{ij} + u_i + v_j) / epsilon$"
"""
return (-C + u.unsqueeze(-1) + v.unsqueeze(-2)) / self.eps
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
| yjh0410/actionformer_release | SinkhornDistance | false | 16,855 | [
"MIT"
] | 61 | 7a97422111d3e29c8d2e14088c850c6975855ea7 | https://github.com/yjh0410/actionformer_release/tree/7a97422111d3e29c8d2e14088c850c6975855ea7 |
FCN8s | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/kn/cknyjwkwufnzzf4ya3scui55ownkmt5cdh3hggzwsfe3ch5fshzm.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5t/c5ta5b5nw4dp65565mg3k6wfbphtogtvx5v75up5yeibgiwkacek.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xq/cxq75w43anllid5ys7ss3yyizuoeph3vvaqlvm5lo434hrywtyle.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/32/c32xiwptfqtyhbnde262mvq5tzywzo6zquurttkv7sztqnze6yni.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jj/cjjz4tpbucpuc3faa2ky32crfwhb5fbnssd6o2yfkgdcjg2acfmo.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tg/ctgdsxjd3rciejxtjvi3y2w5fmmggh5lm3mivuygvkdzeb3zulmc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/e7/ce7jqsdrj5poslb2hpufqd2wdux5xiab5n2auqal3ztzvkzrmnzl.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_7 = async_compile.triton('triton_poi_fused_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 131072
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ks/ckso6iiq5yfqfxmx7ilr6ufrmz6mlkiy75pexzhyf3ierq4pu3zl.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_8 = async_compile.triton('triton_poi_fused_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 262144
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4i/c4islqctnux7quywor4ljttjc6krtgvecvzfsjd2pvp4i6z2bufb.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_9 = async_compile.triton('triton_poi_fused_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2097152
xnumel = 49
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (49*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (25088*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/h5/ch5nk7tx3kr64bil7o47xjdzytlvf6h572v4eipz6bqw4jnikisw.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_10 = async_compile.triton('triton_poi_fused_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 9
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (48*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ox/coxotsmzjcqgicpgnqgkbwlirvmbjxnoyiwtpux6rrdtzx3qiv4m.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_11 = async_compile.triton('triton_poi_fused_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 256], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_11(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 9
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (256*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (768*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cp/ccp4ojszpovg6la422usrylat5keq24pc6vzzggj6xjcgfrzhcxg.py
# Topologically Sorted Source Nodes: [conv2d, h], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# h => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [100, 100], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_12 = async_compile.triton('triton_poi_fused_convolution_relu_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[33554432],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 17572864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xz/cxzlno4gfs3kc72eat6xpiu6xn5dr6yvc2opm5zd7zvxg7ggvnie.py
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# h_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_13 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4393216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64) % 131
x2 = (xindex // 8384)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (33536*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (33536*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (16768 + x0 + (128*x1) + (33536*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (16832 + x0 + (128*x1) + (33536*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7f/c7f3npybiepto4pskonp4rrflyi2drrmu7gy7zqiboknduhfvsfv.py
# Topologically Sorted Source Nodes: [conv2d_2, h_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# h_3 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_14 = async_compile.triton('triton_poi_fused_convolution_relu_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6v/c6vh6fohzdliwr7kxmm7qx6hmyvozyr5aky4loctm53mwljdb23x.py
# Topologically Sorted Source Nodes: [h_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# h_5 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_15 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2230272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 8448) % 66
x1 = (xindex // 128) % 66
x0 = xindex % 128
x3 = (xindex // 557568)
x6 = xindex
tmp0 = 2*x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 131, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2*x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp10, other=float("-inf"))
tmp12 = 1 + (2*x1)
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp16, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x2)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (16768 + x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp23, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (16896 + x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp26, other=float("-inf"))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + (x6), tmp28, None)
tl.store(out_ptr1 + (x6), tmp38, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t7/ct7w7cw5mqsq4ea34flessxkmsh4ej3cv3n6smkzfa27walntfeh.py
# Topologically Sorted Source Nodes: [conv2d_4, h_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# h_6 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_16 = async_compile.triton('triton_poi_fused_convolution_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4460544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/d2/cd2lbbktrlinq53ce3g2bvhqixfzcynec4yh52pbb5no5nvmictc.py
# Topologically Sorted Source Nodes: [h_9], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# h_9 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_17 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = (xindex // 256) % 33
x2 = (xindex // 8448)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (33792*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (33792*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (16896 + x0 + (512*x1) + (33792*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (17152 + x0 + (512*x1) + (33792*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ka/ckakcvtkfyvarj6bygevbxelipfntuuwg3fl65cadqwzpxizcl7j.py
# Topologically Sorted Source Nodes: [conv2d_7, h_10], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# h_10 => relu_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
triton_poi_fused_convolution_relu_18 = async_compile.triton('triton_poi_fused_convolution_relu_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2230272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7s/c7sk5lcaepj3olmug655q7hxtrgrqziu4e5f27yx7ka6vbwluk3s.py
# Topologically Sorted Source Nodes: [h_13], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# h_13 => getitem_6, getitem_7
# Graph fragment:
# %getitem_6 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_19 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 591872
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 8704) % 17
x1 = (xindex // 512) % 17
x0 = xindex % 512
x3 = (xindex // 147968)
x6 = xindex
tmp0 = 2*x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 33, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2*x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp10, other=float("-inf"))
tmp12 = 1 + (2*x1)
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (512 + x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp16, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x2)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (16896 + x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp23, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (17408 + x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp26, other=float("-inf"))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + (x6), tmp28, None)
tl.store(out_ptr1 + (x6), tmp38, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hx/chxwibttvyopiavs7og2i665mglcnendceewlhrfkqxdqprsj4r4.py
# Topologically Sorted Source Nodes: [conv2d_10, h_14], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_10 => convolution_10
# h_14 => relu_10
# Graph fragment:
# %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {})
triton_poi_fused_convolution_relu_20 = async_compile.triton('triton_poi_fused_convolution_relu_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_20', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 591872
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7u/c7usmy4jmwhqvoypqaj7minodxefdgyuup7zbx3o5sdav22vppzn.py
# Topologically Sorted Source Nodes: [h_17], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# h_17 => getitem_8, getitem_9
# Graph fragment:
# %getitem_8 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 0), kwargs = {})
# %getitem_9 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_21 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_21(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 165888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 4608) % 9
x1 = (xindex // 512) % 9
x0 = xindex % 512
x3 = (xindex // 41472)
x6 = xindex
tmp0 = 2*x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 17, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2*x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp10, other=float("-inf"))
tmp12 = 1 + (2*x1)
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (512 + x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp16, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x2)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (8704 + x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp23, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (9216 + x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp26, other=float("-inf"))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + (x6), tmp28, None)
tl.store(out_ptr1 + (x6), tmp38, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vf/cvf6cbxucccgtdj2kshu7xqtxofuulnihdlcxxkpyduukdvzdwro.py
# Topologically Sorted Source Nodes: [conv2d_13, h_18], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_13 => convolution_13
# h_18 => relu_13
# Graph fragment:
# %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_8, %primals_28, %primals_29, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_13,), kwargs = {})
triton_poi_fused_convolution_relu_22 = async_compile.triton('triton_poi_fused_convolution_relu_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_22', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_22(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 147456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yh/cyhkbjvetymby6mpct7rlpndqqutx3zjf4sp5ispsk4z5zau4q3c.py
# Topologically Sorted Source Nodes: [h_22], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# h_22 => convolution_15
# Graph fragment:
# %convolution_15 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_14, %primals_32, %primals_33, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_23 = async_compile.triton('triton_poi_fused_convolution_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_23', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_23(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 3
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7n/c7na7waruh5rpupfepq2cof4fo26n5qyixxqcvjtajoo7dsre5a3.py
# Topologically Sorted Source Nodes: [h_26], Original ATen: [aten.add]
# Source node to ATen node mapping:
# h_26 => add
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_16, %slice_4), kwargs = {})
triton_poi_fused_add_24 = async_compile.triton('triton_poi_fused_add_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_24', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_24(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = (xindex // 24) % 8
x3 = (xindex // 192)
x5 = xindex % 24
x0 = xindex % 3
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (270 + x5 + (51*x2) + (867*x3)), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/af/caf46sg5csubt24ujylbqfbaid2x74jglivbmclbuzoelyup5w2t.py
# Topologically Sorted Source Nodes: [h_30], Original ATen: [aten.add]
# Source node to ATen node mapping:
# h_30 => add_1
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_18, %slice_8), kwargs = {})
triton_poi_fused_add_25 = async_compile.triton('triton_poi_fused_add_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_25', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_25(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 3888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = (xindex // 54) % 18
x3 = (xindex // 972)
x5 = xindex % 54
x0 = xindex % 3
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (918 + x5 + (99*x2) + (3267*x3)), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6w/c6wjjphuxndubhydm6hfil2d6af5jh3h6ixwyrxs2slyqlprlxgs.py
# Topologically Sorted Source Nodes: [h_32], Original ATen: [aten.slice]
# Source node to ATen node mapping:
# h_32 => slice_12
# Graph fragment:
# %slice_12 : [num_users=1] = call_function[target=torch.ops.aten.slice.Tensor](args = (%slice_11, 3, 31, 95), kwargs = {})
triton_poi_fused_slice_26 = async_compile.triton('triton_poi_fused_slice_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_slice_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_slice_26(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex % 64
x3 = (xindex // 64)
y0 = yindex % 3
y1 = (yindex // 3)
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (14229 + y0 + (3*x2) + (456*x3) + (69312*y1)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x5 + (4096*y4)), tmp0, ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512, ), (1, ))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512, ), (1, ))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512, ), (1, ))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512, ), (1, ))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512, ), (1, ))
assert_size_stride(primals_28, (4096, 512, 7, 7), (25088, 49, 7, 1))
assert_size_stride(primals_29, (4096, ), (1, ))
assert_size_stride(primals_30, (4096, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_31, (4096, ), (1, ))
assert_size_stride(primals_32, (3, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_33, (3, ), (1, ))
assert_size_stride(primals_34, (3, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_35, (3, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_36, (3, ), (1, ))
assert_size_stride(primals_37, (3, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_38, (3, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_39, (3, ), (1, ))
assert_size_stride(primals_40, (3, 3, 16, 16), (768, 256, 16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_2, buf1, 192, 9, grid=grid(192, 9), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 4096, 9, grid=grid(4096, 9), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0)
del primals_8
buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_10, buf5, 32768, 9, grid=grid(32768, 9), stream=stream0)
del primals_10
buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_12, buf6, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_12
buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_14, buf7, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_14
buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_7.run(primals_16, buf8, 131072, 9, grid=grid(131072, 9), stream=stream0)
del primals_16
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_18, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_18
buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_20, buf10, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_20
buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_22, buf11, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_22
buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_24, buf12, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_24
buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_26, buf13, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_26
buf14 = empty_strided_cuda((4096, 512, 7, 7), (25088, 1, 3584, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_9.run(primals_28, buf14, 2097152, 49, grid=grid(2097152, 49), stream=stream0)
del primals_28
buf15 = empty_strided_cuda((3, 3, 4, 4), (48, 1, 12, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_10.run(primals_34, buf15, 9, 16, grid=grid(9, 16), stream=stream0)
del primals_34
buf16 = empty_strided_cuda((3, 3, 4, 4), (48, 1, 12, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_10.run(primals_37, buf16, 9, 16, grid=grid(9, 16), stream=stream0)
del primals_37
buf17 = empty_strided_cuda((3, 3, 16, 16), (768, 1, 48, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_11.run(primals_40, buf17, 9, 256, grid=grid(9, 256), stream=stream0)
del primals_40
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(100, 100), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 262, 262), (4393216, 1, 16768, 64))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [conv2d, h], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_12.run(buf19, primals_3, 17572864, grid=grid(17572864), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 64, 262, 262), (4393216, 1, 16768, 64))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, h_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_12.run(buf21, primals_5, 17572864, grid=grid(17572864), stream=stream0)
del primals_5
buf22 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64), torch.float32)
buf23 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64), torch.int8)
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_13.run(buf21, buf22, buf23, 4393216, grid=grid(4393216), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf22, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 131, 131), (2196608, 1, 16768, 128))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, h_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_14.run(buf25, primals_7, 8786432, grid=grid(8786432), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 128, 131, 131), (2196608, 1, 16768, 128))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, h_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_14.run(buf27, primals_9, 8786432, grid=grid(8786432), stream=stream0)
del primals_9
buf28 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128), torch.float32)
buf29 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128), torch.int8)
# Topologically Sorted Source Nodes: [h_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_15.run(buf27, buf28, buf29, 2230272, grid=grid(2230272), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf28, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf31 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, h_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf31, primals_11, 4460544, grid=grid(4460544), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf31, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf33 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, h_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf33, primals_13, 4460544, grid=grid(4460544), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf33, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [conv2d_6, h_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf35, primals_15, 4460544, grid=grid(4460544), stream=stream0)
del primals_15
buf36 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256), torch.float32)
buf37 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256), torch.int8)
# Topologically Sorted Source Nodes: [h_9], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_17.run(buf35, buf36, buf37, 1115136, grid=grid(1115136), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf36, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf39 = buf38; del buf38 # reuse
# Topologically Sorted Source Nodes: [conv2d_7, h_10], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_18.run(buf39, primals_17, 2230272, grid=grid(2230272), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf40 = extern_kernels.convolution(buf39, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf41 = buf40; del buf40 # reuse
# Topologically Sorted Source Nodes: [conv2d_8, h_11], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_18.run(buf41, primals_19, 2230272, grid=grid(2230272), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf41, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf43 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [conv2d_9, h_12], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_18.run(buf43, primals_21, 2230272, grid=grid(2230272), stream=stream0)
del primals_21
buf44 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512), torch.float32)
buf45 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512), torch.int8)
# Topologically Sorted Source Nodes: [h_13], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_19.run(buf43, buf44, buf45, 591872, grid=grid(591872), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf44, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf47 = buf46; del buf46 # reuse
# Topologically Sorted Source Nodes: [conv2d_10, h_14], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_20.run(buf47, primals_23, 591872, grid=grid(591872), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf48 = extern_kernels.convolution(buf47, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf49 = buf48; del buf48 # reuse
# Topologically Sorted Source Nodes: [conv2d_11, h_15], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_20.run(buf49, primals_25, 591872, grid=grid(591872), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf50 = extern_kernels.convolution(buf49, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf51 = buf50; del buf50 # reuse
# Topologically Sorted Source Nodes: [conv2d_12, h_16], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_20.run(buf51, primals_27, 591872, grid=grid(591872), stream=stream0)
del primals_27
buf52 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512), torch.float32)
buf53 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512), torch.int8)
# Topologically Sorted Source Nodes: [h_17], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_21.run(buf51, buf52, buf53, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
buf54 = extern_kernels.convolution(buf52, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 4096, 3, 3), (36864, 1, 12288, 4096))
buf55 = buf54; del buf54 # reuse
# Topologically Sorted Source Nodes: [conv2d_13, h_18], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_22.run(buf55, primals_29, 147456, grid=grid(147456), stream=stream0)
del primals_29
# Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf55, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 4096, 3, 3), (36864, 1, 12288, 4096))
buf57 = buf56; del buf56 # reuse
# Topologically Sorted Source Nodes: [conv2d_14, h_20], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_22.run(buf57, primals_31, 147456, grid=grid(147456), stream=stream0)
del primals_31
# Topologically Sorted Source Nodes: [h_22], Original ATen: [aten.convolution]
buf58 = extern_kernels.convolution(buf57, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 3, 3, 3), (27, 1, 9, 3))
buf59 = buf58; del buf58 # reuse
# Topologically Sorted Source Nodes: [h_22], Original ATen: [aten.convolution]
triton_poi_fused_convolution_23.run(buf59, primals_33, 108, grid=grid(108), stream=stream0)
del primals_33
# Topologically Sorted Source Nodes: [h_23], Original ATen: [aten.convolution]
buf60 = extern_kernels.convolution(buf59, buf15, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 3, 8, 8), (192, 1, 24, 3))
# Topologically Sorted Source Nodes: [h_24], Original ATen: [aten.convolution]
buf61 = extern_kernels.convolution(buf44, primals_35, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 3, 17, 17), (867, 1, 51, 3))
buf62 = buf60; del buf60 # reuse
# Topologically Sorted Source Nodes: [h_26], Original ATen: [aten.add]
triton_poi_fused_add_24.run(buf62, buf61, primals_36, 768, grid=grid(768), stream=stream0)
del buf61
del primals_36
# Topologically Sorted Source Nodes: [h_27], Original ATen: [aten.convolution]
buf63 = extern_kernels.convolution(buf62, buf16, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf63, (4, 3, 18, 18), (972, 1, 54, 3))
# Topologically Sorted Source Nodes: [h_28], Original ATen: [aten.convolution]
buf64 = extern_kernels.convolution(buf36, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 3, 33, 33), (3267, 1, 99, 3))
buf65 = buf63; del buf63 # reuse
# Topologically Sorted Source Nodes: [h_30], Original ATen: [aten.add]
triton_poi_fused_add_25.run(buf65, buf64, primals_39, 3888, grid=grid(3888), stream=stream0)
del buf64
del primals_39
# Topologically Sorted Source Nodes: [h_31], Original ATen: [aten.convolution]
buf66 = extern_kernels.convolution(buf65, buf17, stride=(8, 8), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 3, 152, 152), (69312, 1, 456, 3))
buf67 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_32], Original ATen: [aten.slice]
triton_poi_fused_slice_26.run(buf66, buf67, 12, 4096, grid=grid(12, 4096), stream=stream0)
del buf66
return (buf67, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf14, primals_30, primals_32, buf15, primals_35, buf16, primals_38, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf28, buf29, buf31, buf33, buf35, buf36, buf37, buf39, buf41, buf43, buf44, buf45, buf47, buf49, buf51, buf52, buf53, buf55, buf57, buf59, buf62, buf65, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((4096, 512, 7, 7), (25088, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((4096, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((3, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((3, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((3, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((3, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((3, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((3, 3, 16, 16), (768, 256, 16, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class FCN8s(nn.Module):
def __init__(self, n_class=3):
super(FCN8s, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=100)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.relu3_1 = nn.ReLU(inplace=True)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_2 = nn.ReLU(inplace=True)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_1 = nn.ReLU(inplace=True)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_2 = nn.ReLU(inplace=True)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_3 = nn.ReLU(inplace=True)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.fc6 = nn.Conv2d(512, 4096, 7)
self.relu6 = nn.ReLU(inplace=True)
self.drop6 = nn.Dropout2d()
self.fc7 = nn.Conv2d(4096, 4096, 1)
self.relu7 = nn.ReLU(inplace=True)
self.drop7 = nn.Dropout2d()
self.score_fr = nn.Conv2d(4096, n_class, 1)
self.score_pool3 = nn.Conv2d(256, n_class, 1)
self.score_pool4 = nn.Conv2d(512, n_class, 1)
self.upscore2 = nn.ConvTranspose2d(n_class, n_class, 4, stride=2,
bias=False)
self.upscore8 = nn.ConvTranspose2d(n_class, n_class, 16, stride=8,
bias=False)
self.upscore_pool4 = nn.ConvTranspose2d(n_class, n_class, 4, stride
=2, bias=False)
self._initialize_weights()
def _initialize_weights(self):
for mod in self.modules():
if isinstance(mod, nn.Conv2d):
mod.weight.data.zero_()
if mod.bias is not None:
mod.bias.data.zero_()
if isinstance(mod, nn.ConvTranspose2d):
m, k, h, w = mod.weight.data.shape
if m != k and k != 1:
raise RuntimeError(
'input + output channels need to be the same or |output| == 1'
)
if h != w:
raise RuntimeError('filters need to be square')
filt = torch.from_numpy(self.upsample_filt(h)).float()
mod.weight.data[range(m), range(k), :, :] = filt
def upsample_filt(self, size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center
) / factor)
def forward(self, x):
h = x
h = self.relu1_1(self.conv1_1(h))
h = self.relu1_2(self.conv1_2(h))
h = self.pool1(h)
h = self.relu2_1(self.conv2_1(h))
h = self.relu2_2(self.conv2_2(h))
h = self.pool2(h)
h = self.relu3_1(self.conv3_1(h))
h = self.relu3_2(self.conv3_2(h))
h = self.relu3_3(self.conv3_3(h))
h = self.pool3(h)
pool3 = h
h = self.relu4_1(self.conv4_1(h))
h = self.relu4_2(self.conv4_2(h))
h = self.relu4_3(self.conv4_3(h))
h = self.pool4(h)
pool4 = h
h = self.relu5_1(self.conv5_1(h))
h = self.relu5_2(self.conv5_2(h))
h = self.relu5_3(self.conv5_3(h))
h = self.pool5(h)
h = self.relu6(self.fc6(h))
h = self.drop6(h)
h = self.relu7(self.fc7(h))
h = self.drop7(h)
h = self.score_fr(h)
h = self.upscore2(h)
upscore2 = h
h = self.score_pool4(pool4)
h = h[:, :, 5:5 + upscore2.size()[2], 5:5 + upscore2.size()[3]]
score_pool4c = h
h = upscore2 + score_pool4c
h = self.upscore_pool4(h)
upscore_pool4 = h
h = self.score_pool3(pool3)
h = h[:, :, 9:9 + upscore_pool4.size()[2], 9:9 + upscore_pool4.size
()[3]]
score_pool3c = h
h = upscore_pool4 + score_pool3c
h = self.upscore8(h)
h = h[:, :, 31:31 + x.size()[2], 31:31 + x.size()[3]]
return h
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 49
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 25088 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 9
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 48 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_11(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 9
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 768 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 17572864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4393216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 131
x2 = xindex // 8384
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 33536 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 33536 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (16768 + x0 + 128 * x1 + 33536 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (16832 + x0 + 128 * x1 + 33536 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 8448 % 66
x1 = xindex // 128 % 66
x0 = xindex % 128
x3 = xindex // 557568
x6 = xindex
tmp0 = 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 131, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + 256 * x1 + 33536 * x2 + 2196608 * x3),
tmp10, other=float('-inf'))
tmp12 = 1 + 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 33536 * x2 + 2196608 *
x3), tmp16, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x2
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (16768 + x0 + 256 * x1 + 33536 * x2 + 2196608 *
x3), tmp23, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (16896 + x0 + 256 * x1 + 33536 * x2 + 2196608 *
x3), tmp26, other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + x6, tmp28, None)
tl.store(out_ptr1 + x6, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = xindex // 256 % 33
x2 = xindex // 8448
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 33792 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 33792 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (16896 + x0 + 512 * x1 + 33792 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (17152 + x0 + 512 * x1 + 33792 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 8704 % 17
x1 = xindex // 512 % 17
x0 = xindex % 512
x3 = xindex // 147968
x6 = xindex
tmp0 = 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 33, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 33792 * x2 + 557568 * x3),
tmp10, other=float('-inf'))
tmp12 = 1 + 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 33792 * x2 + 557568 *
x3), tmp16, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x2
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (16896 + x0 + 1024 * x1 + 33792 * x2 + 557568 *
x3), tmp23, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (17408 + x0 + 1024 * x1 + 33792 * x2 + 557568 *
x3), tmp26, other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + x6, tmp28, None)
tl.store(out_ptr1 + x6, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_21(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 4608 % 9
x1 = xindex // 512 % 9
x0 = xindex % 512
x3 = xindex // 41472
x6 = xindex
tmp0 = 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 17, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 17408 * x2 + 147968 * x3),
tmp10, other=float('-inf'))
tmp12 = 1 + 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 17408 * x2 + 147968 *
x3), tmp16, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x2
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (8704 + x0 + 1024 * x1 + 17408 * x2 + 147968 *
x3), tmp23, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (9216 + x0 + 1024 * x1 + 17408 * x2 + 147968 *
x3), tmp26, other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + x6, tmp28, None)
tl.store(out_ptr1 + x6, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_relu_22(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_23(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 108
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 3
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_24(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = xindex // 24 % 8
x3 = xindex // 192
x5 = xindex % 24
x0 = xindex % 3
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (270 + x5 + 51 * x2 + 867 * x3), xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_25(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 3888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = xindex // 54 % 18
x3 = xindex // 972
x5 = xindex % 54
x0 = xindex % 3
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (918 + x5 + 99 * x2 + 3267 * x3), xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_poi_fused_slice_26(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl
.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex % 64
x3 = xindex // 64
y0 = yindex % 3
y1 = yindex // 3
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (14229 + y0 + 3 * x2 + 456 * x3 + 69312 * y1),
ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x5 + 4096 * y4), tmp0, ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512,), (1,))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512,), (1,))
assert_size_stride(primals_28, (4096, 512, 7, 7), (25088, 49, 7, 1))
assert_size_stride(primals_29, (4096,), (1,))
assert_size_stride(primals_30, (4096, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_31, (4096,), (1,))
assert_size_stride(primals_32, (3, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_33, (3,), (1,))
assert_size_stride(primals_34, (3, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_35, (3, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_36, (3,), (1,))
assert_size_stride(primals_37, (3, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_38, (3, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_39, (3,), (1,))
assert_size_stride(primals_40, (3, 3, 16, 16), (768, 256, 16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(12, 4096)](primals_1, buf0, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
triton_poi_fused_1[grid(192, 9)](primals_2, buf1, 192, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_22
buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_24
buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_26
buf14 = empty_strided_cuda((4096, 512, 7, 7), (25088, 1, 3584, 512),
torch.float32)
triton_poi_fused_9[grid(2097152, 49)](primals_28, buf14, 2097152,
49, XBLOCK=32, YBLOCK=64, num_warps=8, num_stages=1)
del primals_28
buf15 = empty_strided_cuda((3, 3, 4, 4), (48, 1, 12, 3), torch.float32)
triton_poi_fused_10[grid(9, 16)](primals_34, buf15, 9, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_34
buf16 = empty_strided_cuda((3, 3, 4, 4), (48, 1, 12, 3), torch.float32)
triton_poi_fused_10[grid(9, 16)](primals_37, buf16, 9, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_37
buf17 = empty_strided_cuda((3, 3, 16, 16), (768, 1, 48, 3), torch.
float32)
triton_poi_fused_11[grid(9, 256)](primals_40, buf17, 9, 256, XBLOCK
=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_40
buf18 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(100, 100), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 262, 262), (4393216, 1, 16768, 64))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_12[grid(17572864)](buf19,
primals_3, 17572864, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf20 = extern_kernels.convolution(buf19, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 64, 262, 262), (4393216, 1, 16768, 64))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_12[grid(17572864)](buf21,
primals_5, 17572864, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf22 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64
), torch.float32)
buf23 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64
), torch.int8)
triton_poi_fused_max_pool2d_with_indices_13[grid(4393216)](buf21,
buf22, buf23, 4393216, XBLOCK=512, num_warps=8, num_stages=1)
buf24 = extern_kernels.convolution(buf22, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 131, 131), (2196608, 1, 16768, 128))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_14[grid(8786432)](buf25,
primals_7, 8786432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf26 = extern_kernels.convolution(buf25, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 128, 131, 131), (2196608, 1, 16768, 128))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_14[grid(8786432)](buf27,
primals_9, 8786432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf28 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128),
torch.float32)
buf29 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_15[grid(2230272)](buf27,
buf28, buf29, 2230272, XBLOCK=512, num_warps=8, num_stages=1)
buf30 = extern_kernels.convolution(buf28, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_16[grid(4460544)](buf31,
primals_11, 4460544, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf32 = extern_kernels.convolution(buf31, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf33 = buf32
del buf32
triton_poi_fused_convolution_relu_16[grid(4460544)](buf33,
primals_13, 4460544, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf34 = extern_kernels.convolution(buf33, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf35 = buf34
del buf34
triton_poi_fused_convolution_relu_16[grid(4460544)](buf35,
primals_15, 4460544, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf36 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256),
torch.float32)
buf37 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_17[grid(1115136)](buf35,
buf36, buf37, 1115136, XBLOCK=512, num_warps=8, num_stages=1)
buf38 = extern_kernels.convolution(buf36, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf39 = buf38
del buf38
triton_poi_fused_convolution_relu_18[grid(2230272)](buf39,
primals_17, 2230272, XBLOCK=512, num_warps=8, num_stages=1)
del primals_17
buf40 = extern_kernels.convolution(buf39, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf41 = buf40
del buf40
triton_poi_fused_convolution_relu_18[grid(2230272)](buf41,
primals_19, 2230272, XBLOCK=512, num_warps=8, num_stages=1)
del primals_19
buf42 = extern_kernels.convolution(buf41, buf10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf43 = buf42
del buf42
triton_poi_fused_convolution_relu_18[grid(2230272)](buf43,
primals_21, 2230272, XBLOCK=512, num_warps=8, num_stages=1)
del primals_21
buf44 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512),
torch.float32)
buf45 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_19[grid(591872)](buf43,
buf44, buf45, 591872, XBLOCK=1024, num_warps=4, num_stages=1)
buf46 = extern_kernels.convolution(buf44, buf11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf47 = buf46
del buf46
triton_poi_fused_convolution_relu_20[grid(591872)](buf47,
primals_23, 591872, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_23
buf48 = extern_kernels.convolution(buf47, buf12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf49 = buf48
del buf48
triton_poi_fused_convolution_relu_20[grid(591872)](buf49,
primals_25, 591872, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_25
buf50 = extern_kernels.convolution(buf49, buf13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf51 = buf50
del buf50
triton_poi_fused_convolution_relu_20[grid(591872)](buf51,
primals_27, 591872, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_27
buf52 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512),
torch.float32)
buf53 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_21[grid(165888)](buf51,
buf52, buf53, 165888, XBLOCK=512, num_warps=8, num_stages=1)
buf54 = extern_kernels.convolution(buf52, buf14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 4096, 3, 3), (36864, 1, 12288, 4096))
buf55 = buf54
del buf54
triton_poi_fused_convolution_relu_22[grid(147456)](buf55,
primals_29, 147456, XBLOCK=512, num_warps=8, num_stages=1)
del primals_29
buf56 = extern_kernels.convolution(buf55, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 4096, 3, 3), (36864, 1, 12288, 4096))
buf57 = buf56
del buf56
triton_poi_fused_convolution_relu_22[grid(147456)](buf57,
primals_31, 147456, XBLOCK=512, num_warps=8, num_stages=1)
del primals_31
buf58 = extern_kernels.convolution(buf57, primals_32, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 3, 3, 3), (27, 1, 9, 3))
buf59 = buf58
del buf58
triton_poi_fused_convolution_23[grid(108)](buf59, primals_33, 108,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_33
buf60 = extern_kernels.convolution(buf59, buf15, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 3, 8, 8), (192, 1, 24, 3))
buf61 = extern_kernels.convolution(buf44, primals_35, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 3, 17, 17), (867, 1, 51, 3))
buf62 = buf60
del buf60
triton_poi_fused_add_24[grid(768)](buf62, buf61, primals_36, 768,
XBLOCK=128, num_warps=4, num_stages=1)
del buf61
del primals_36
buf63 = extern_kernels.convolution(buf62, buf16, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf63, (4, 3, 18, 18), (972, 1, 54, 3))
buf64 = extern_kernels.convolution(buf36, primals_38, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 3, 33, 33), (3267, 1, 99, 3))
buf65 = buf63
del buf63
triton_poi_fused_add_25[grid(3888)](buf65, buf64, primals_39, 3888,
XBLOCK=256, num_warps=4, num_stages=1)
del buf64
del primals_39
buf66 = extern_kernels.convolution(buf65, buf17, stride=(8, 8),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 3, 152, 152), (69312, 1, 456, 3))
buf67 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
triton_poi_fused_slice_26[grid(12, 4096)](buf66, buf67, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del buf66
return (buf67, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8,
buf9, buf10, buf11, buf12, buf13, buf14, primals_30, primals_32,
buf15, primals_35, buf16, primals_38, buf17, buf19, buf21, buf22,
buf23, buf25, buf27, buf28, buf29, buf31, buf33, buf35, buf36,
buf37, buf39, buf41, buf43, buf44, buf45, buf47, buf49, buf51,
buf52, buf53, buf55, buf57, buf59, buf62, buf65)
class FCN8sNew(nn.Module):
def __init__(self, n_class=3):
super(FCN8sNew, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=100)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.relu3_1 = nn.ReLU(inplace=True)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_2 = nn.ReLU(inplace=True)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_1 = nn.ReLU(inplace=True)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_2 = nn.ReLU(inplace=True)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_3 = nn.ReLU(inplace=True)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.fc6 = nn.Conv2d(512, 4096, 7)
self.relu6 = nn.ReLU(inplace=True)
self.drop6 = nn.Dropout2d()
self.fc7 = nn.Conv2d(4096, 4096, 1)
self.relu7 = nn.ReLU(inplace=True)
self.drop7 = nn.Dropout2d()
self.score_fr = nn.Conv2d(4096, n_class, 1)
self.score_pool3 = nn.Conv2d(256, n_class, 1)
self.score_pool4 = nn.Conv2d(512, n_class, 1)
self.upscore2 = nn.ConvTranspose2d(n_class, n_class, 4, stride=2,
bias=False)
self.upscore8 = nn.ConvTranspose2d(n_class, n_class, 16, stride=8,
bias=False)
self.upscore_pool4 = nn.ConvTranspose2d(n_class, n_class, 4, stride
=2, bias=False)
self._initialize_weights()
def _initialize_weights(self):
for mod in self.modules():
if isinstance(mod, nn.Conv2d):
mod.weight.data.zero_()
if mod.bias is not None:
mod.bias.data.zero_()
if isinstance(mod, nn.ConvTranspose2d):
m, k, h, w = mod.weight.data.shape
if m != k and k != 1:
raise RuntimeError(
'input + output channels need to be the same or |output| == 1'
)
if h != w:
raise RuntimeError('filters need to be square')
filt = torch.from_numpy(self.upsample_filt(h)).float()
mod.weight.data[range(m), range(k), :, :] = filt
def upsample_filt(self, size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center
) / factor)
def forward(self, input_0):
primals_2 = self.conv1_1.weight
primals_3 = self.conv1_1.bias
primals_4 = self.conv1_2.weight
primals_5 = self.conv1_2.bias
primals_6 = self.conv2_1.weight
primals_7 = self.conv2_1.bias
primals_8 = self.conv2_2.weight
primals_9 = self.conv2_2.bias
primals_10 = self.conv3_1.weight
primals_11 = self.conv3_1.bias
primals_12 = self.conv3_2.weight
primals_13 = self.conv3_2.bias
primals_14 = self.conv3_3.weight
primals_15 = self.conv3_3.bias
primals_16 = self.conv4_1.weight
primals_17 = self.conv4_1.bias
primals_18 = self.conv4_2.weight
primals_19 = self.conv4_2.bias
primals_20 = self.conv4_3.weight
primals_21 = self.conv4_3.bias
primals_22 = self.conv5_1.weight
primals_23 = self.conv5_1.bias
primals_24 = self.conv5_2.weight
primals_25 = self.conv5_2.bias
primals_26 = self.conv5_3.weight
primals_27 = self.conv5_3.bias
primals_28 = self.fc6.weight
primals_29 = self.fc6.bias
primals_30 = self.fc7.weight
primals_31 = self.fc7.bias
primals_32 = self.score_fr.weight
primals_33 = self.score_fr.bias
primals_38 = self.score_pool3.weight
primals_36 = self.score_pool3.bias
primals_35 = self.score_pool4.weight
primals_39 = self.score_pool4.bias
primals_34 = self.upscore2.weight
primals_40 = self.upscore8.weight
primals_37 = self.upscore_pool4.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40])
return output[0]
| twni2016/OrganSegRSTN_PyTorch | FCN8s | false | 16,856 | [
"MIT"
] | 100 | bf571320e718c8f138e04d48645e3b4dfe75801d | https://github.com/twni2016/OrganSegRSTN_PyTorch/tree/bf571320e718c8f138e04d48645e3b4dfe75801d |
LayoutNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/oy/coywg33tjoxmlqbwwhyvzwfsjs5n7ceqi5tzkvjci3qwulab2iv2.py
# Topologically Sorted Source Nodes: [conv1, conv1_relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv1 => convolution
# conv1_relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8388608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 65536) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bo/cbobvss6c6iqh2cuinv6ecehmjbf3ihhrfb5gfsiif5xta2gtdfo.py
# Topologically Sorted Source Nodes: [pool1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# pool1 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = (xindex // 128)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (512*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (512*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (256 + (2*x0) + (512*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (257 + (2*x0) + (512*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fa/cfajt7vh3ahxp6emvz7olqbsg43uzopvztq57g3omsehusmvtego.py
# Topologically Sorted Source Nodes: [conv2, conv2_relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2 => convolution_1
# conv2_relu => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4194304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16384) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jb/cjblo3k3wgsjkcpv6rqstgy24fpa7gatyfvafzpfh2azprp3t5is.py
# Topologically Sorted Source Nodes: [pool2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# pool2 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = (xindex // 64)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (256*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (256*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (128 + (2*x0) + (256*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (129 + (2*x0) + (256*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3j/c3j4jkvokwjzyp5aekzyppt7m5e3z6uaeuav42db4ccfjozaasyw.py
# Topologically Sorted Source Nodes: [conv3, conv3_relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv3 => convolution_2
# conv3_relu => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cl/cclry2h5k76pty6kqbs5iwxj4rd2ojgszpjgdj234z5l2tmywvxz.py
# Topologically Sorted Source Nodes: [pool3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# pool3 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/b4/cb4rafbxurylblcz5jurin6ysseyfihblhiourxkkk3rbybhbhj4.py
# Topologically Sorted Source Nodes: [conv4, conv4_relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv4 => convolution_3
# conv4_relu => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/az/cazafesp4cnkhuqwee57d5uvmoijfpl4z4uq772ps4t2jxpto2tt.py
# Topologically Sorted Source Nodes: [pool4], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# pool4 => getitem_6, getitem_7
# Graph fragment:
# %getitem_6 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mc/cmcaufjiuodhizgvps6nh6hglhzidcdegze2d3lhq6j7ew3etpvf.py
# Topologically Sorted Source Nodes: [conv5, conv5_relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv5 => convolution_4
# conv5_relu => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/sf/csfoayapm3pqgrwbr6tsnvmpq7tz7urardhxq4x7vmqmo62x7zqz.py
# Topologically Sorted Source Nodes: [pool5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# pool5 => getitem_8, getitem_9
# Graph fragment:
# %getitem_8 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 0), kwargs = {})
# %getitem_9 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_9 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/oq/coqobqtj55mct5p5rd7wsny6k3kptdo5wralrfj5ikqajjviotdp.py
# Topologically Sorted Source Nodes: [conv6, conv6_relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv6 => convolution_5
# conv6_relu => relu_5
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_8, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 1024
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4g/c4gdhobmlqpduhitdil2o2czatqppl5il2xfdltufptmnlalyvei.py
# Topologically Sorted Source Nodes: [pool6], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# pool6 => getitem_10, getitem_11
# Graph fragment:
# %getitem_10 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_5, 0), kwargs = {})
# %getitem_11 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_5, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_11 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fj/cfjgyjiqvf3abs4dy4txpjcye7ha2yr5cqiefekqzasnsg56hf7r.py
# Topologically Sorted Source Nodes: [conv7, conv7_relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv7 => convolution_6
# conv7_relu => relu_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_10, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
triton_poi_fused_convolution_relu_12 = async_compile.triton('triton_poi_fused_convolution_relu_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 2048
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rq/crqgu5pinbxudm7evft6dkladuvsa53mj5myvmwxs565rgay34ic.py
# Topologically Sorted Source Nodes: [pool7], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# pool7 => _low_memory_max_pool2d_with_offsets_6, getitem_13
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_6 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_6, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_13 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_6, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_13 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/e5/ce54oyubq3rhtie5fig34gtkaoou2t3f2lhxazukjkh3pvuuptgs.py
# Topologically Sorted Source Nodes: [unpool00], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# unpool00 => add, add_1, convert_element_type, convert_element_type_1, iota, mul, mul_1
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {})
# %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_1, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_14 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_14(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gj/cgjlr5hhmjr6uhckt4rv2vemlbus4z6czg4q5f7dtlp3aogs3qhq.py
# Topologically Sorted Source Nodes: [pool7, unpool00], Original ATen: [aten.max_pool2d_with_indices, aten._unsafe_index]
# Source node to ATen node mapping:
# pool7 => _low_memory_max_pool2d_with_offsets_6
# unpool00 => _unsafe_index
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_6 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_6, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %_unsafe_index : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%getitem_12, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {})
triton_poi_fused__unsafe_index_max_pool2d_with_indices_15 = async_compile.triton('triton_poi_fused__unsafe_index_max_pool2d_with_indices_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_max_pool2d_with_indices_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_max_pool2d_with_indices_15(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4) % 4
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 2, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + ((2*tmp8) + (8*tmp4) + (16*x2)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (2*tmp8) + (8*tmp4) + (16*x2)), None, eviction_policy='evict_last')
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.load(in_ptr1 + (4 + (2*tmp8) + (8*tmp4) + (16*x2)), None, eviction_policy='evict_last')
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.load(in_ptr1 + (5 + (2*tmp8) + (8*tmp4) + (16*x2)), None, eviction_policy='evict_last')
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x4), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/x6/cx6vvy7fqvcsp5c7il3btujal3owv3hc7jbvmoojmcc32vj76vkg.py
# Topologically Sorted Source Nodes: [unpool0], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# unpool0 => add_4, add_5, convert_element_type_4, convert_element_type_5, iota_2, mul_4, mul_5
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0), kwargs = {})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.float32), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.5), kwargs = {})
# %convert_element_type_5 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_5, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_16 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_16(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rj/crjwuk2tkmi6gyr7t6a4g2kgnzxdagzq5yydhzub2c2qr6mh5w7u.py
# Topologically Sorted Source Nodes: [unpool0_, unpool0], Original ATen: [aten.cat, aten._unsafe_index]
# Source node to ATen node mapping:
# unpool0 => _unsafe_index_1
# unpool0_ => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_7, %getitem_10], 1), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat, [None, None, %unsqueeze_1, %convert_element_type_5]), kwargs = {})
triton_poi_fused__unsafe_index_cat_17 = async_compile.triton('triton_poi_fused__unsafe_index_cat_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_cat_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_cat_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64) % 2048
x3 = (xindex // 131072)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tmp9 >= tmp10
tmp12 = tl.full([1], 1024, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + (4*tmp4) + (16*x2) + (16384*x3)), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp13, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tmp22 = tl.full([1], 2048, tl.int64)
tmp23 = tmp9 < tmp22
tmp24 = tl.load(in_ptr3 + (tmp8 + (4*tmp4) + (16*((-1024) + x2)) + (16384*x3)), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + (x5), tmp25, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f3/cf3qfh2e5uuz3pswi5dq56ubmdfrmocdzprd6kc7s5dykkgebxwk.py
# Topologically Sorted Source Nodes: [unpool1], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# unpool1 => add_8, add_9, convert_element_type_8, convert_element_type_9, iota_4, mul_8, mul_9
# Graph fragment:
# %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_4, 1), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, 0), kwargs = {})
# %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_8, torch.float32), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_8, 0.0), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_9, 0.5), kwargs = {})
# %convert_element_type_9 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_9, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_18 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_18(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zk/czkgjedhzacgvjbu5dbmfq7vzc67fwgwerif7kron435q7vswxy4.py
# Topologically Sorted Source Nodes: [unpool1_, unpool1], Original ATen: [aten.cat, aten._unsafe_index]
# Source node to ATen node mapping:
# unpool1 => _unsafe_index_2
# unpool1_ => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_8, %getitem_8], 1), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_1, [None, None, %unsqueeze_2, %convert_element_type_9]), kwargs = {})
triton_poi_fused__unsafe_index_cat_19 = async_compile.triton('triton_poi_fused__unsafe_index_cat_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_cat_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_cat_19(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 16
x0 = xindex % 16
x2 = (xindex // 256) % 1024
x3 = (xindex // 262144)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tmp9 >= tmp10
tmp12 = tl.full([1], 512, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + (8*tmp4) + (64*x2) + (32768*x3)), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp13, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tmp22 = tl.full([1], 1024, tl.int64)
tmp23 = tmp9 < tmp22
tmp24 = tl.load(in_ptr3 + (tmp8 + (8*tmp4) + (64*((-512) + x2)) + (32768*x3)), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + (x5), tmp25, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wg/cwgxtc4y3ifd3t7fprczhrjtdxlfclh54p7dwvlgwkaet2cxhnox.py
# Topologically Sorted Source Nodes: [unpool2], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# unpool2 => add_12, add_13, convert_element_type_12, convert_element_type_13, iota_6, mul_12, mul_13
# Graph fragment:
# %iota_6 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_6, 1), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_12, 0), kwargs = {})
# %convert_element_type_12 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_12, torch.float32), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_12, 0.0), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_13, 0.5), kwargs = {})
# %convert_element_type_13 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_13, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_20 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_20(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qj/cqjv7x3j5xpjqa4dtjzjbnjiqxu7rm6syjwhgnf7k4tofcjtgee3.py
# Topologically Sorted Source Nodes: [unpool2_, unpool2], Original ATen: [aten.cat, aten._unsafe_index]
# Source node to ATen node mapping:
# unpool2 => _unsafe_index_3
# unpool2_ => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_9, %getitem_6], 1), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_2, [None, None, %unsqueeze_3, %convert_element_type_13]), kwargs = {})
triton_poi_fused__unsafe_index_cat_21 = async_compile.triton('triton_poi_fused__unsafe_index_cat_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_cat_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_cat_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 32) % 32
x0 = xindex % 32
x2 = (xindex // 1024) % 512
x3 = (xindex // 524288)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tmp9 >= tmp10
tmp12 = tl.full([1], 256, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + (16*tmp4) + (256*x2) + (65536*x3)), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp13, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tmp22 = tl.full([1], 512, tl.int64)
tmp23 = tmp9 < tmp22
tmp24 = tl.load(in_ptr3 + (tmp8 + (16*tmp4) + (256*((-256) + x2)) + (65536*x3)), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + (x5), tmp25, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xu/cxu7kgbkohaiwpo3ugwf6okihdg5vvrhsz2wkz4ds7uhbdmehgwn.py
# Topologically Sorted Source Nodes: [unpool3], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# unpool3 => add_16, add_17, convert_element_type_16, convert_element_type_17, iota_8, mul_16, mul_17
# Graph fragment:
# %iota_8 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_8, 1), kwargs = {})
# %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_16, 0), kwargs = {})
# %convert_element_type_16 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_16, torch.float32), kwargs = {})
# %add_17 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_16, 0.0), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_17, 0.5), kwargs = {})
# %convert_element_type_17 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_17, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_22 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_22(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f5/cf5nh6etasklywv4gmc7lyc5qtmlvsrmrhnghmywmf3xgz6snhil.py
# Topologically Sorted Source Nodes: [unpool3_, unpool3], Original ATen: [aten.cat, aten._unsafe_index]
# Source node to ATen node mapping:
# unpool3 => _unsafe_index_4
# unpool3_ => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_10, %getitem_4], 1), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_3, [None, None, %unsqueeze_4, %convert_element_type_17]), kwargs = {})
triton_poi_fused__unsafe_index_cat_23 = async_compile.triton('triton_poi_fused__unsafe_index_cat_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_cat_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_cat_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4194304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x2 = (xindex // 4096) % 256
x3 = (xindex // 1048576)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tmp9 >= tmp10
tmp12 = tl.full([1], 128, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + (32*tmp4) + (1024*x2) + (131072*x3)), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp13, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tmp22 = tl.full([1], 256, tl.int64)
tmp23 = tmp9 < tmp22
tmp24 = tl.load(in_ptr3 + (tmp8 + (32*tmp4) + (1024*((-128) + x2)) + (131072*x3)), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + (x5), tmp25, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3g/c3gdln3s24m4go5ettvcp3nzg4evqwtm7aqyhr6mapn7etrrhbbq.py
# Topologically Sorted Source Nodes: [unpool4], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# unpool4 => add_20, add_21, convert_element_type_20, convert_element_type_21, iota_10, mul_20, mul_21
# Graph fragment:
# %iota_10 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (128,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_10, 1), kwargs = {})
# %add_20 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_20, 0), kwargs = {})
# %convert_element_type_20 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_20, torch.float32), kwargs = {})
# %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_20, 0.0), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_21, 0.5), kwargs = {})
# %convert_element_type_21 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_21, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_24 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_24(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/x5/cx5y5iiathk2hobaptmy6kypg4bi4alfd2vi2r7bwcznokfncawg.py
# Topologically Sorted Source Nodes: [unpool4_, unpool4], Original ATen: [aten.cat, aten._unsafe_index]
# Source node to ATen node mapping:
# unpool4 => _unsafe_index_5
# unpool4_ => cat_4
# Graph fragment:
# %cat_4 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_11, %getitem_2], 1), kwargs = {})
# %_unsafe_index_5 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_4, [None, None, %unsqueeze_5, %convert_element_type_21]), kwargs = {})
triton_poi_fused__unsafe_index_cat_25 = async_compile.triton('triton_poi_fused__unsafe_index_cat_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_cat_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_cat_25(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8388608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 128) % 128
x0 = xindex % 128
x2 = (xindex // 16384) % 128
x3 = (xindex // 2097152)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tmp9 >= tmp10
tmp12 = tl.full([1], 64, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + (64*tmp4) + (4096*x2) + (262144*x3)), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp13, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tmp22 = tl.full([1], 128, tl.int64)
tmp23 = tmp9 < tmp22
tmp24 = tl.load(in_ptr3 + (tmp8 + (64*tmp4) + (4096*((-64) + x2)) + (262144*x3)), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + (x5), tmp25, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mg/cmgdn5agbl4ghrm4l2vv3mztbbpp5z4xpga37iqfh722we6zh6z2.py
# Topologically Sorted Source Nodes: [unpool5], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# unpool5 => add_24, add_25, convert_element_type_24, convert_element_type_25, iota_12, mul_24, mul_25
# Graph fragment:
# %iota_12 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (256,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_12, 1), kwargs = {})
# %add_24 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_24, 0), kwargs = {})
# %convert_element_type_24 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_24, torch.float32), kwargs = {})
# %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_24, 0.0), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_25, 0.5), kwargs = {})
# %convert_element_type_25 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_25, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_26 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_26(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gw/cgwkuqzp73uufaj6hkqcz6wcup65ht5gppww5pam2bhwn6gjfulu.py
# Topologically Sorted Source Nodes: [unpool5_, unpool5], Original ATen: [aten.cat, aten._unsafe_index]
# Source node to ATen node mapping:
# unpool5 => _unsafe_index_6
# unpool5_ => cat_5
# Graph fragment:
# %cat_5 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_12, %getitem], 1), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_5, [None, None, %unsqueeze_6, %convert_element_type_25]), kwargs = {})
triton_poi_fused__unsafe_index_cat_27 = async_compile.triton('triton_poi_fused__unsafe_index_cat_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_cat_27', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_cat_27(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16777216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 256) % 256
x0 = xindex % 256
x2 = (xindex // 65536) % 64
x3 = (xindex // 4194304)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 128, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tmp9 >= tmp10
tmp12 = tl.full([1], 32, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + (128*tmp4) + (16384*x2) + (524288*x3)), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp13, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tmp22 = tl.full([1], 64, tl.int64)
tmp23 = tmp9 < tmp22
tmp24 = tl.load(in_ptr3 + (tmp8 + (128*tmp4) + (16384*((-32) + x2)) + (524288*x3)), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + (x5), tmp25, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dr/cdrgiyexaf7sbklmdcjz3uq4la3rw2rlhstdvnieezyhbppev5y6.py
# Topologically Sorted Source Nodes: [deconv5, deconv6_sf], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# deconv5 => convolution_13
# deconv6_sf => sigmoid
# Graph fragment:
# %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_6, %primals_28, %primals_29, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_13,), kwargs = {})
triton_poi_fused_convolution_sigmoid_28 = async_compile.triton('triton_poi_fused_convolution_sigmoid_28', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_28', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_28(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 65536) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/p7/cp7grftbfimeho4vqsjjsfpbgc7abomfgiek5ghsgvqzyipedfsz.py
# Topologically Sorted Source Nodes: [unpool0_c, unpool0_c_1], Original ATen: [aten.cat, aten._unsafe_index]
# Source node to ATen node mapping:
# unpool0_c => cat_6
# unpool0_c_1 => _unsafe_index_7
# Graph fragment:
# %cat_6 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_13, %cat], 1), kwargs = {})
# %_unsafe_index_7 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_6, [None, None, %unsqueeze_1, %convert_element_type_5]), kwargs = {})
triton_poi_fused__unsafe_index_cat_29 = async_compile.triton('triton_poi_fused__unsafe_index_cat_29', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_cat_29', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_cat_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64) % 3072
x3 = (xindex // 196608)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tmp9 >= tmp10
tmp12 = tl.full([1], 1024, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + (4*tmp4) + (16*x2) + (16384*x3)), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp13, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tmp22 = tl.full([1], 3072, tl.int64)
tmp23 = tmp9 < tmp22
tmp24 = (-1024) + x2
tmp25 = tmp24 >= tmp10
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + (4*tmp4) + (16*((-1024) + x2)) + (16384*x3)), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + ((-1024) + x2), tmp27, eviction_policy='evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tmp35 = tl.full([1], 2048, tl.int64)
tmp36 = tmp24 < tmp35
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + (4*tmp4) + (16*((-1024) + ((-1024) + x2))) + (16384*x3)), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + (x5), tmp42, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xo/cxo6v4r3rntqpluye2ib5xlcff2aybmyoi4k42njpoejrrh7ey4u.py
# Topologically Sorted Source Nodes: [unpool1_c, unpool1_c_1], Original ATen: [aten.cat, aten._unsafe_index]
# Source node to ATen node mapping:
# unpool1_c => cat_7
# unpool1_c_1 => _unsafe_index_8
# Graph fragment:
# %cat_7 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_14, %cat_1], 1), kwargs = {})
# %_unsafe_index_8 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_7, [None, None, %unsqueeze_2, %convert_element_type_9]), kwargs = {})
triton_poi_fused__unsafe_index_cat_30 = async_compile.triton('triton_poi_fused__unsafe_index_cat_30', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_cat_30', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_cat_30(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1572864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 16
x0 = xindex % 16
x2 = (xindex // 256) % 1536
x3 = (xindex // 393216)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tmp9 >= tmp10
tmp12 = tl.full([1], 512, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + (8*tmp4) + (64*x2) + (32768*x3)), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp13, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tmp22 = tl.full([1], 1536, tl.int64)
tmp23 = tmp9 < tmp22
tmp24 = (-512) + x2
tmp25 = tmp24 >= tmp10
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + (8*tmp4) + (64*((-512) + x2)) + (32768*x3)), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + ((-512) + x2), tmp27, eviction_policy='evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tmp35 = tl.full([1], 1024, tl.int64)
tmp36 = tmp24 < tmp35
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + (8*tmp4) + (64*((-512) + ((-512) + x2))) + (32768*x3)), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + (x5), tmp42, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5c/c5cp64uayaohzh7ivqsruxk3ue2ztm4h2noktk5322csgzpqyrm7.py
# Topologically Sorted Source Nodes: [unpool2_c, unpool2_c_1], Original ATen: [aten.cat, aten._unsafe_index]
# Source node to ATen node mapping:
# unpool2_c => cat_8
# unpool2_c_1 => _unsafe_index_9
# Graph fragment:
# %cat_8 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_15, %cat_2], 1), kwargs = {})
# %_unsafe_index_9 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_8, [None, None, %unsqueeze_3, %convert_element_type_13]), kwargs = {})
triton_poi_fused__unsafe_index_cat_31 = async_compile.triton('triton_poi_fused__unsafe_index_cat_31', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_cat_31', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_cat_31(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3145728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 32) % 32
x0 = xindex % 32
x2 = (xindex // 1024) % 768
x3 = (xindex // 786432)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tmp9 >= tmp10
tmp12 = tl.full([1], 256, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + (16*tmp4) + (256*x2) + (65536*x3)), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp13, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tmp22 = tl.full([1], 768, tl.int64)
tmp23 = tmp9 < tmp22
tmp24 = (-256) + x2
tmp25 = tmp24 >= tmp10
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + (16*tmp4) + (256*((-256) + x2)) + (65536*x3)), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + ((-256) + x2), tmp27, eviction_policy='evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tmp35 = tl.full([1], 512, tl.int64)
tmp36 = tmp24 < tmp35
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + (16*tmp4) + (256*((-256) + ((-256) + x2))) + (65536*x3)), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + (x5), tmp42, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qg/cqgfqadrva5m44vv47bh74mkiz3qlpkflanqioe36iwzwd54kwqu.py
# Topologically Sorted Source Nodes: [unpool3_c, unpool3_c_1], Original ATen: [aten.cat, aten._unsafe_index]
# Source node to ATen node mapping:
# unpool3_c => cat_9
# unpool3_c_1 => _unsafe_index_10
# Graph fragment:
# %cat_9 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_16, %cat_3], 1), kwargs = {})
# %_unsafe_index_10 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_9, [None, None, %unsqueeze_4, %convert_element_type_17]), kwargs = {})
triton_poi_fused__unsafe_index_cat_32 = async_compile.triton('triton_poi_fused__unsafe_index_cat_32', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_cat_32', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_cat_32(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6291456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x2 = (xindex // 4096) % 384
x3 = (xindex // 1572864)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tmp9 >= tmp10
tmp12 = tl.full([1], 128, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + (32*tmp4) + (1024*x2) + (131072*x3)), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp13, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tmp22 = tl.full([1], 384, tl.int64)
tmp23 = tmp9 < tmp22
tmp24 = (-128) + x2
tmp25 = tmp24 >= tmp10
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + (32*tmp4) + (1024*((-128) + x2)) + (131072*x3)), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + ((-128) + x2), tmp27, eviction_policy='evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tmp35 = tl.full([1], 256, tl.int64)
tmp36 = tmp24 < tmp35
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + (32*tmp4) + (1024*((-128) + ((-128) + x2))) + (131072*x3)), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + (x5), tmp42, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gi/cgijr7ita2qvxn6h7t45l63nfmz32ywuip5l4rpxjp4jsyrowcsk.py
# Topologically Sorted Source Nodes: [unpool4_c, unpool4_c_1], Original ATen: [aten.cat, aten._unsafe_index]
# Source node to ATen node mapping:
# unpool4_c => cat_10
# unpool4_c_1 => _unsafe_index_11
# Graph fragment:
# %cat_10 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_17, %cat_4], 1), kwargs = {})
# %_unsafe_index_11 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_10, [None, None, %unsqueeze_5, %convert_element_type_21]), kwargs = {})
triton_poi_fused__unsafe_index_cat_33 = async_compile.triton('triton_poi_fused__unsafe_index_cat_33', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_cat_33', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_cat_33(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12582912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 128) % 128
x0 = xindex % 128
x2 = (xindex // 16384) % 192
x3 = (xindex // 3145728)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tmp9 >= tmp10
tmp12 = tl.full([1], 64, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + (64*tmp4) + (4096*x2) + (262144*x3)), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp13, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tmp22 = tl.full([1], 192, tl.int64)
tmp23 = tmp9 < tmp22
tmp24 = (-64) + x2
tmp25 = tmp24 >= tmp10
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + (64*tmp4) + (4096*((-64) + x2)) + (262144*x3)), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + ((-64) + x2), tmp27, eviction_policy='evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tmp35 = tl.full([1], 128, tl.int64)
tmp36 = tmp24 < tmp35
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + (64*tmp4) + (4096*((-64) + ((-64) + x2))) + (262144*x3)), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + (x5), tmp42, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7k/c7kebx7v4i4jrafuhse2kwyxtmayerb3xz4sfh25fyrxdwrgwbb6.py
# Topologically Sorted Source Nodes: [unpool5_c, unpool5_c_1], Original ATen: [aten.cat, aten._unsafe_index]
# Source node to ATen node mapping:
# unpool5_c => cat_11
# unpool5_c_1 => _unsafe_index_12
# Graph fragment:
# %cat_11 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_18, %cat_5], 1), kwargs = {})
# %_unsafe_index_12 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_11, [None, None, %unsqueeze_6, %convert_element_type_25]), kwargs = {})
triton_poi_fused__unsafe_index_cat_34 = async_compile.triton('triton_poi_fused__unsafe_index_cat_34', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[33554432],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_cat_34', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_cat_34(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25165824
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 256) % 256
x0 = xindex % 256
x2 = (xindex // 65536) % 96
x3 = (xindex // 6291456)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 128, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tmp9 >= tmp10
tmp12 = tl.full([1], 32, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + (128*tmp4) + (16384*x2) + (524288*x3)), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + (x2), tmp13, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tmp22 = tl.full([1], 96, tl.int64)
tmp23 = tmp9 < tmp22
tmp24 = (-32) + x2
tmp25 = tmp24 >= tmp10
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + (128*tmp4) + (16384*((-32) + x2)) + (524288*x3)), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + ((-32) + x2), tmp27, eviction_policy='evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tmp35 = tl.full([1], 64, tl.int64)
tmp36 = tmp24 < tmp35
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + (128*tmp4) + (16384*((-32) + ((-32) + x2))) + (524288*x3)), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + (x5), tmp42, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7k/c7khnhvxxxs5amdbri56fzgrnbgvky72bqgtin3aohzrff5vtnd5.py
# Topologically Sorted Source Nodes: [deconv5_c, deconv6_sf_c], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# deconv5_c => convolution_20
# deconv6_sf_c => sigmoid_1
# Graph fragment:
# %convolution_20 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_12, %primals_42, %primals_43, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_20,), kwargs = {})
triton_poi_fused_convolution_sigmoid_35 = async_compile.triton('triton_poi_fused_convolution_sigmoid_35', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_35', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_35(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4194304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 65536) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3n/c3n4u2tky6pjdfrl3skxj5ppma6jfop3gkyu55cljd4bewxdzj6e.py
# Topologically Sorted Source Nodes: [ref1_relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# ref1_relu => relu_19
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_45), kwargs = {})
# %relu_19 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_36 = async_compile.triton('triton_poi_fused_relu_36', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_36', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_36(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zu/czuaextbc2o7kp4cvbu4sx6v2wzp4so2pec74cdpjc3bkmxwurm5.py
# Topologically Sorted Source Nodes: [ref2_relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# ref2_relu => relu_20
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_47), kwargs = {})
# %relu_20 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_37 = async_compile.triton('triton_poi_fused_relu_37', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_37', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_37(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rl/crltevqtx5c2ktt57lvvk2a3ffbngjydke6s22tm6iwzzpzgx6zs.py
# Topologically Sorted Source Nodes: [ref3_relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# ref3_relu => relu_21
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_49), kwargs = {})
# %relu_21 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_38 = async_compile.triton('triton_poi_fused_relu_38', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_38', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_38(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ba/cbany7jlvvh7qxghgvm3ukq5d4gvvmbufltj7gp33cah2u3gi3lh.py
# Topologically Sorted Source Nodes: [deconv4_c, deconv4_relu_c], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# deconv4_c => convolution_19
# deconv4_relu_c => relu_18
# Graph fragment:
# %convolution_19 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_11, %primals_40, %primals_41, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_18 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_19,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_18, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_39 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_39', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_39', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_39(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16384) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/db/cdbhiy2qrh5wlzjhzkqqsxxye6twqzhz6lb2brl7tgiemwef3fp2.py
# Topologically Sorted Source Nodes: [deconv3_c, deconv3_relu_c], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# deconv3_c => convolution_18
# deconv3_relu_c => relu_17
# Graph fragment:
# %convolution_18 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_10, %primals_38, %primals_39, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_17 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_18,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_17, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_40 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_40', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_40', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_40(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/he/chelcabnso7za3rb7mnwtwgwtdudnpdgeod33kqfcpk2zob6mc57.py
# Topologically Sorted Source Nodes: [deconv2_c, deconv2_relu_c], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# deconv2_c => convolution_17
# deconv2_relu_c => relu_16
# Graph fragment:
# %convolution_17 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_9, %primals_36, %primals_37, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_16 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_17,), kwargs = {})
# %le_5 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_16, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_41 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_41', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_41', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_41(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/nc/cncod4iym72nzpcvfyz6agclz4f4bhv4bqtc54nf2mfn5p37zmfv.py
# Topologically Sorted Source Nodes: [deconv1_c, deconv1_relu_c], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# deconv1_c => convolution_16
# deconv1_relu_c => relu_15
# Graph fragment:
# %convolution_16 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_8, %primals_34, %primals_35, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_15 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_16,), kwargs = {})
# %le_6 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_15, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_42 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_42', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_42', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_42(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ou/couxqau75w2x7yddcdzwzbzhfqphwm4wmglmhmwrrtpfbkdn2a2e.py
# Topologically Sorted Source Nodes: [deconv0_c, deconv0_relu_c], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# deconv0_c => convolution_15
# deconv0_relu_c => relu_14
# Graph fragment:
# %convolution_15 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_7, %primals_32, %primals_33, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_14 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_15,), kwargs = {})
# %le_7 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_14, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_43 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_43', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_43', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_43(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4f/c4fkvtlxvdagl6pngspcsfycarldh45wpp3m5gyejpgibkkbtufa.py
# Topologically Sorted Source Nodes: [deconv00_c, deconv00_relu_c], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# deconv00_c => convolution_14
# deconv00_relu_c => relu_13
# Graph fragment:
# %convolution_14 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_30, %primals_31, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_14,), kwargs = {})
# %le_8 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_13, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_44 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_44', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_44', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_44(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 1024
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 256, 256), (196608, 65536, 256, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (512, ), (1, ))
assert_size_stride(primals_12, (1024, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_13, (1024, ), (1, ))
assert_size_stride(primals_14, (2048, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_15, (2048, ), (1, ))
assert_size_stride(primals_16, (1024, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_17, (1024, ), (1, ))
assert_size_stride(primals_18, (512, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (256, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_21, (256, ), (1, ))
assert_size_stride(primals_22, (128, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (128, ), (1, ))
assert_size_stride(primals_24, (64, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (64, ), (1, ))
assert_size_stride(primals_26, (32, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (32, ), (1, ))
assert_size_stride(primals_28, (3, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_29, (3, ), (1, ))
assert_size_stride(primals_30, (1024, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_31, (1024, ), (1, ))
assert_size_stride(primals_32, (512, 3072, 3, 3), (27648, 9, 3, 1))
assert_size_stride(primals_33, (512, ), (1, ))
assert_size_stride(primals_34, (256, 1536, 3, 3), (13824, 9, 3, 1))
assert_size_stride(primals_35, (256, ), (1, ))
assert_size_stride(primals_36, (128, 768, 3, 3), (6912, 9, 3, 1))
assert_size_stride(primals_37, (128, ), (1, ))
assert_size_stride(primals_38, (64, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_39, (64, ), (1, ))
assert_size_stride(primals_40, (32, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_41, (32, ), (1, ))
assert_size_stride(primals_42, (16, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_43, (16, ), (1, ))
assert_size_stride(primals_44, (1024, 32768), (32768, 1))
assert_size_stride(primals_45, (1024, ), (1, ))
assert_size_stride(primals_46, (256, 1024), (1024, 1))
assert_size_stride(primals_47, (256, ), (1, ))
assert_size_stride(primals_48, (64, 256), (256, 1))
assert_size_stride(primals_49, (64, ), (1, ))
assert_size_stride(primals_50, (11, 64), (64, 1))
assert_size_stride(primals_51, (11, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 256, 256), (2097152, 65536, 256, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv1, conv1_relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 8388608, grid=grid(8388608), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128, 1), torch.float32)
buf3 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128, 1), torch.int8)
# Topologically Sorted Source Nodes: [pool1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 2097152, grid=grid(2097152), stream=stream0)
# Topologically Sorted Source Nodes: [conv2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 128, 128), (1048576, 16384, 128, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2, conv2_relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 4194304, grid=grid(4194304), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.int8)
# Topologically Sorted Source Nodes: [pool2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1048576, grid=grid(1048576), stream=stream0)
# Topologically Sorted Source Nodes: [conv3], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv3, conv3_relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 2097152, grid=grid(2097152), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32)
buf11 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [pool3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 524288, grid=grid(524288), stream=stream0)
# Topologically Sorted Source Nodes: [conv4], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 32, 32), (262144, 1024, 32, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [conv4, conv4_relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf13, primals_9, 1048576, grid=grid(1048576), stream=stream0)
del primals_9
buf14 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32)
buf15 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [pool4], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_7.run(buf13, buf14, buf15, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv5], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 512, 16, 16), (131072, 256, 16, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [conv5, conv5_relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf17, primals_11, 524288, grid=grid(524288), stream=stream0)
del primals_11
buf18 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.float32)
buf19 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [pool5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_9.run(buf17, buf18, buf19, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv6], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 1024, 8, 8), (65536, 64, 8, 1))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [conv6, conv6_relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_10.run(buf21, primals_13, 262144, grid=grid(262144), stream=stream0)
del primals_13
buf22 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1), torch.int8)
# Topologically Sorted Source Nodes: [pool6], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_11.run(buf21, buf22, buf23, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv7], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 2048, 4, 4), (32768, 16, 4, 1))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [conv7, conv7_relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_12.run(buf25, primals_15, 131072, grid=grid(131072), stream=stream0)
del primals_15
buf26 = empty_strided_cuda((4, 2048, 2, 2), (8192, 4, 2, 1), torch.int8)
buf63 = empty_strided_cuda((4, 2048, 2, 2), (8192, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [pool7], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_13.run(buf25, buf26, buf63, 32768, grid=grid(32768), stream=stream0)
buf27 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [unpool00], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_14.run(buf27, 4, grid=grid(4), stream=stream0)
buf28 = empty_strided_cuda((4, 2048, 4, 4), (32768, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pool7, unpool00], Original ATen: [aten.max_pool2d_with_indices, aten._unsafe_index]
triton_poi_fused__unsafe_index_max_pool2d_with_indices_15.run(buf27, buf25, buf28, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [deconv00], Original ATen: [aten.convolution]
buf29 = extern_kernels.convolution(buf28, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf29, (4, 1024, 4, 4), (16384, 16, 4, 1))
buf30 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [unpool0], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_16.run(buf30, 8, grid=grid(8), stream=stream0)
buf31 = empty_strided_cuda((4, 2048, 8, 8), (131072, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [unpool0_, unpool0], Original ATen: [aten.cat, aten._unsafe_index]
triton_poi_fused__unsafe_index_cat_17.run(buf30, buf29, primals_17, buf22, buf31, 524288, grid=grid(524288), stream=stream0)
# Topologically Sorted Source Nodes: [deconv0], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf31, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 512, 8, 8), (32768, 64, 8, 1))
buf33 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [unpool1], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_18.run(buf33, 16, grid=grid(16), stream=stream0)
buf34 = empty_strided_cuda((4, 1024, 16, 16), (262144, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [unpool1_, unpool1], Original ATen: [aten.cat, aten._unsafe_index]
triton_poi_fused__unsafe_index_cat_19.run(buf33, buf32, primals_19, buf18, buf34, 1048576, grid=grid(1048576), stream=stream0)
# Topologically Sorted Source Nodes: [deconv1], Original ATen: [aten.convolution]
buf35 = extern_kernels.convolution(buf34, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 256, 16, 16), (65536, 256, 16, 1))
buf36 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [unpool2], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_20.run(buf36, 32, grid=grid(32), stream=stream0)
buf37 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [unpool2_, unpool2], Original ATen: [aten.cat, aten._unsafe_index]
triton_poi_fused__unsafe_index_cat_21.run(buf36, buf35, primals_21, buf14, buf37, 2097152, grid=grid(2097152), stream=stream0)
# Topologically Sorted Source Nodes: [deconv2], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf39 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [unpool3], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_22.run(buf39, 64, grid=grid(64), stream=stream0)
buf40 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [unpool3_, unpool3], Original ATen: [aten.cat, aten._unsafe_index]
triton_poi_fused__unsafe_index_cat_23.run(buf39, buf38, primals_23, buf10, buf40, 4194304, grid=grid(4194304), stream=stream0)
# Topologically Sorted Source Nodes: [deconv3], Original ATen: [aten.convolution]
buf41 = extern_kernels.convolution(buf40, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf42 = empty_strided_cuda((128, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [unpool4], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_24.run(buf42, 128, grid=grid(128), stream=stream0)
buf43 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128, 1), torch.float32)
# Topologically Sorted Source Nodes: [unpool4_, unpool4], Original ATen: [aten.cat, aten._unsafe_index]
triton_poi_fused__unsafe_index_cat_25.run(buf42, buf41, primals_25, buf6, buf43, 8388608, grid=grid(8388608), stream=stream0)
# Topologically Sorted Source Nodes: [deconv4], Original ATen: [aten.convolution]
buf44 = extern_kernels.convolution(buf43, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 32, 128, 128), (524288, 16384, 128, 1))
buf45 = empty_strided_cuda((256, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [unpool5], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_26.run(buf45, 256, grid=grid(256), stream=stream0)
buf46 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256, 1), torch.float32)
# Topologically Sorted Source Nodes: [unpool5_, unpool5], Original ATen: [aten.cat, aten._unsafe_index]
triton_poi_fused__unsafe_index_cat_27.run(buf45, buf44, primals_27, buf2, buf46, 16777216, grid=grid(16777216), stream=stream0)
# Topologically Sorted Source Nodes: [deconv5], Original ATen: [aten.convolution]
buf47 = extern_kernels.convolution(buf46, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 3, 256, 256), (196608, 65536, 256, 1))
buf48 = buf47; del buf47 # reuse
# Topologically Sorted Source Nodes: [deconv5, deconv6_sf], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_28.run(buf48, primals_29, 786432, grid=grid(786432), stream=stream0)
del primals_29
# Topologically Sorted Source Nodes: [deconv00_c], Original ATen: [aten.convolution]
buf49 = extern_kernels.convolution(buf28, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 1024, 4, 4), (16384, 16, 4, 1))
buf50 = empty_strided_cuda((4, 3072, 8, 8), (196608, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [unpool0_c, unpool0_c_1], Original ATen: [aten.cat, aten._unsafe_index]
triton_poi_fused__unsafe_index_cat_29.run(buf30, buf49, primals_31, buf29, primals_17, buf22, buf50, 786432, grid=grid(786432), stream=stream0)
# Topologically Sorted Source Nodes: [deconv0_c], Original ATen: [aten.convolution]
buf51 = extern_kernels.convolution(buf50, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 512, 8, 8), (32768, 64, 8, 1))
buf52 = empty_strided_cuda((4, 1536, 16, 16), (393216, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [unpool1_c, unpool1_c_1], Original ATen: [aten.cat, aten._unsafe_index]
triton_poi_fused__unsafe_index_cat_30.run(buf33, buf51, primals_33, buf32, primals_19, buf18, buf52, 1572864, grid=grid(1572864), stream=stream0)
# Topologically Sorted Source Nodes: [deconv1_c], Original ATen: [aten.convolution]
buf53 = extern_kernels.convolution(buf52, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 256, 16, 16), (65536, 256, 16, 1))
buf54 = empty_strided_cuda((4, 768, 32, 32), (786432, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [unpool2_c, unpool2_c_1], Original ATen: [aten.cat, aten._unsafe_index]
triton_poi_fused__unsafe_index_cat_31.run(buf36, buf53, primals_35, buf35, primals_21, buf14, buf54, 3145728, grid=grid(3145728), stream=stream0)
# Topologically Sorted Source Nodes: [deconv2_c], Original ATen: [aten.convolution]
buf55 = extern_kernels.convolution(buf54, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf56 = empty_strided_cuda((4, 384, 64, 64), (1572864, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [unpool3_c, unpool3_c_1], Original ATen: [aten.cat, aten._unsafe_index]
triton_poi_fused__unsafe_index_cat_32.run(buf39, buf55, primals_37, buf38, primals_23, buf10, buf56, 6291456, grid=grid(6291456), stream=stream0)
# Topologically Sorted Source Nodes: [deconv3_c], Original ATen: [aten.convolution]
buf57 = extern_kernels.convolution(buf56, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf58 = empty_strided_cuda((4, 192, 128, 128), (3145728, 16384, 128, 1), torch.float32)
# Topologically Sorted Source Nodes: [unpool4_c, unpool4_c_1], Original ATen: [aten.cat, aten._unsafe_index]
triton_poi_fused__unsafe_index_cat_33.run(buf42, buf57, primals_39, buf41, primals_25, buf6, buf58, 12582912, grid=grid(12582912), stream=stream0)
# Topologically Sorted Source Nodes: [deconv4_c], Original ATen: [aten.convolution]
buf59 = extern_kernels.convolution(buf58, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 32, 128, 128), (524288, 16384, 128, 1))
buf60 = empty_strided_cuda((4, 96, 256, 256), (6291456, 65536, 256, 1), torch.float32)
# Topologically Sorted Source Nodes: [unpool5_c, unpool5_c_1], Original ATen: [aten.cat, aten._unsafe_index]
triton_poi_fused__unsafe_index_cat_34.run(buf45, buf59, primals_41, buf44, primals_27, buf2, buf60, 25165824, grid=grid(25165824), stream=stream0)
# Topologically Sorted Source Nodes: [deconv5_c], Original ATen: [aten.convolution]
buf61 = extern_kernels.convolution(buf60, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 16, 256, 256), (1048576, 65536, 256, 1))
buf62 = buf61; del buf61 # reuse
# Topologically Sorted Source Nodes: [deconv5_c, deconv6_sf_c], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_35.run(buf62, primals_43, 4194304, grid=grid(4194304), stream=stream0)
del primals_43
buf64 = empty_strided_cuda((1, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf63, (1, 32768), (0, 1), 0), reinterpret_tensor(primals_44, (32768, 1024), (1, 32768), 0), out=buf64)
buf65 = buf64; del buf64 # reuse
# Topologically Sorted Source Nodes: [ref1_relu], Original ATen: [aten.relu]
triton_poi_fused_relu_36.run(buf65, primals_45, 1024, grid=grid(1024), stream=stream0)
del primals_45
buf66 = empty_strided_cuda((1, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf65, reinterpret_tensor(primals_46, (1024, 256), (1, 1024), 0), out=buf66)
buf67 = buf66; del buf66 # reuse
# Topologically Sorted Source Nodes: [ref2_relu], Original ATen: [aten.relu]
triton_poi_fused_relu_37.run(buf67, primals_47, 256, grid=grid(256), stream=stream0)
del primals_47
buf68 = empty_strided_cuda((1, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf67, reinterpret_tensor(primals_48, (256, 64), (1, 256), 0), out=buf68)
buf69 = buf68; del buf68 # reuse
# Topologically Sorted Source Nodes: [ref3_relu], Original ATen: [aten.relu]
triton_poi_fused_relu_38.run(buf69, primals_49, 64, grid=grid(64), stream=stream0)
del primals_49
buf70 = empty_strided_cuda((1, 11), (11, 1), torch.float32)
# Topologically Sorted Source Nodes: [ref4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_51, buf69, reinterpret_tensor(primals_50, (64, 11), (1, 64), 0), alpha=1, beta=1, out=buf70)
del primals_51
buf71 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [deconv4_c, deconv4_relu_c], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_39.run(buf59, primals_41, buf71, 2097152, grid=grid(2097152), stream=stream0)
del buf59
del primals_41
buf72 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [deconv3_c, deconv3_relu_c], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_40.run(buf57, primals_39, buf72, 1048576, grid=grid(1048576), stream=stream0)
del buf57
del primals_39
buf73 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [deconv2_c, deconv2_relu_c], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_41.run(buf55, primals_37, buf73, 524288, grid=grid(524288), stream=stream0)
del buf55
del primals_37
buf74 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [deconv1_c, deconv1_relu_c], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_42.run(buf53, primals_35, buf74, 262144, grid=grid(262144), stream=stream0)
del buf53
del primals_35
buf75 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [deconv0_c, deconv0_relu_c], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_43.run(buf51, primals_33, buf75, 131072, grid=grid(131072), stream=stream0)
del buf51
del primals_33
buf76 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [deconv00_c, deconv00_relu_c], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_44.run(buf49, primals_31, buf76, 65536, grid=grid(65536), stream=stream0)
del buf49
del primals_31
buf77 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [deconv4, deconv4_relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_39.run(buf44, primals_27, buf77, 2097152, grid=grid(2097152), stream=stream0)
del buf44
del primals_27
buf78 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [deconv3, deconv3_relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_40.run(buf41, primals_25, buf78, 1048576, grid=grid(1048576), stream=stream0)
del buf41
del primals_25
buf79 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [deconv2, deconv2_relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_41.run(buf38, primals_23, buf79, 524288, grid=grid(524288), stream=stream0)
del buf38
del primals_23
buf80 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [deconv1, deconv1_relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_42.run(buf35, primals_21, buf80, 262144, grid=grid(262144), stream=stream0)
del buf35
del primals_21
buf81 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [deconv0, deconv0_relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_43.run(buf32, primals_19, buf81, 131072, grid=grid(131072), stream=stream0)
del buf32
del primals_19
buf82 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [deconv00, deconv00_relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_44.run(buf29, primals_17, buf82, 65536, grid=grid(65536), stream=stream0)
del buf29
del primals_17
return (buf48, buf62, buf70, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, buf15, buf17, buf18, buf19, buf21, buf22, buf23, buf25, buf26, buf27, buf28, buf30, buf31, buf33, buf34, buf36, buf37, buf39, buf40, buf42, buf43, buf45, buf46, buf48, buf50, buf52, buf54, buf56, buf58, buf60, buf62, reinterpret_tensor(buf63, (1, 32768), (32768, 1), 0), buf65, buf67, buf69, primals_50, primals_48, primals_46, primals_44, buf71, buf72, buf73, buf74, buf75, buf76, buf77, buf78, buf79, buf80, buf81, buf82, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 256, 256), (196608, 65536, 256, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1024, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((2048, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((1024, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((256, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((128, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((64, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((32, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((3, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((1024, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((512, 3072, 3, 3), (27648, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((256, 1536, 3, 3), (13824, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((128, 768, 3, 3), (6912, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((64, 384, 3, 3), (3456, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((32, 192, 3, 3), (1728, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((16, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((1024, 32768), (32768, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((256, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((64, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((11, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((11, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LayoutNet(nn.Module):
def __init__(self):
super(LayoutNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1, stride=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=1)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1)
self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1)
self.conv5 = nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=1)
self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=1, stride=1)
self.conv7 = nn.Conv2d(1024, 2048, kernel_size=3, padding=1, stride=1)
self.deconv00 = nn.Conv2d(2048, 1024, kernel_size=3, padding=1,
stride=1)
self.deconv0 = nn.Conv2d(1024 * 2, 512, kernel_size=3, padding=1,
stride=1)
self.deconv1 = nn.Conv2d(512 * 2, 256, kernel_size=3, padding=1,
stride=1)
self.deconv2 = nn.Conv2d(256 * 2, 128, kernel_size=3, padding=1,
stride=1)
self.deconv3 = nn.Conv2d(128 * 2, 64, kernel_size=3, padding=1,
stride=1)
self.deconv4 = nn.Conv2d(64 * 2, 32, kernel_size=3, padding=1, stride=1
)
self.deconv5 = nn.Conv2d(32 * 2, 3, kernel_size=3, padding=1, stride=1)
self.deconv6_sf = nn.Sigmoid()
self.deconv00_c = nn.Conv2d(2048, 1024, kernel_size=3, padding=1,
stride=1)
self.deconv0_c = nn.Conv2d(1024 * 3, 512, kernel_size=3, padding=1,
stride=1)
self.deconv1_c = nn.Conv2d(512 * 3, 256, kernel_size=3, padding=1,
stride=1)
self.deconv2_c = nn.Conv2d(256 * 3, 128, kernel_size=3, padding=1,
stride=1)
self.deconv3_c = nn.Conv2d(128 * 3, 64, kernel_size=3, padding=1,
stride=1)
self.deconv4_c = nn.Conv2d(64 * 3, 32, kernel_size=3, padding=1,
stride=1)
self.deconv5_c = nn.Conv2d(32 * 3, 16, kernel_size=3, padding=1,
stride=1)
self.deconv6_sf_c = nn.Sigmoid()
self.ref1 = nn.Linear(2048 * 4 * 4, 1024)
self.ref2 = nn.Linear(1024, 256)
self.ref3 = nn.Linear(256, 64)
self.ref4 = nn.Linear(64, 11)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
conv1 = self.conv1(x)
conv1_relu = self.relu(conv1)
pool1 = self.pool(conv1_relu)
conv2 = self.conv2(pool1)
conv2_relu = self.relu(conv2)
pool2 = self.pool(conv2_relu)
conv3 = self.conv3(pool2)
conv3_relu = self.relu(conv3)
pool3 = self.pool(conv3_relu)
conv4 = self.conv4(pool3)
conv4_relu = self.relu(conv4)
pool4 = self.pool(conv4_relu)
conv5 = self.conv5(pool4)
conv5_relu = self.relu(conv5)
pool5 = self.pool(conv5_relu)
conv6 = self.conv6(pool5)
conv6_relu = self.relu(conv6)
pool6 = self.pool(conv6_relu)
conv7 = self.conv7(pool6)
conv7_relu = self.relu(conv7)
pool7 = self.pool(conv7_relu)
unpool00 = F.interpolate(pool7, scale_factor=2)
deconv00 = self.deconv00(unpool00)
deconv00_relu = self.relu(deconv00)
unpool0_ = torch.cat((deconv00_relu, pool6), dim=1)
unpool0 = F.interpolate(unpool0_, scale_factor=2)
deconv0 = self.deconv0(unpool0)
deconv0_relu = self.relu(deconv0)
unpool1_ = torch.cat((deconv0_relu, pool5), dim=1)
unpool1 = F.interpolate(unpool1_, scale_factor=2)
deconv1 = self.deconv1(unpool1)
deconv1_relu = self.relu(deconv1)
unpool2_ = torch.cat((deconv1_relu, pool4), dim=1)
unpool2 = F.interpolate(unpool2_, scale_factor=2)
deconv2 = self.deconv2(unpool2)
deconv2_relu = self.relu(deconv2)
unpool3_ = torch.cat((deconv2_relu, pool3), dim=1)
unpool3 = F.interpolate(unpool3_, scale_factor=2)
deconv3 = self.deconv3(unpool3)
deconv3_relu = self.relu(deconv3)
unpool4_ = torch.cat((deconv3_relu, pool2), dim=1)
unpool4 = F.interpolate(unpool4_, scale_factor=2)
deconv4 = self.deconv4(unpool4)
deconv4_relu = self.relu(deconv4)
unpool5_ = torch.cat((deconv4_relu, pool1), dim=1)
unpool5 = F.interpolate(unpool5_, scale_factor=2)
deconv5 = self.deconv5(unpool5)
deconv6_sf = self.deconv6_sf(deconv5)
deconv00_c = self.deconv00_c(unpool00)
deconv00_relu_c = self.relu(deconv00_c)
unpool0_c = torch.cat((deconv00_relu_c, unpool0_), dim=1)
unpool0_c = F.interpolate(unpool0_c, scale_factor=2)
deconv0_c = self.deconv0_c(unpool0_c)
deconv0_relu_c = self.relu(deconv0_c)
unpool1_c = torch.cat((deconv0_relu_c, unpool1_), dim=1)
unpool1_c = F.interpolate(unpool1_c, scale_factor=2)
deconv1_c = self.deconv1_c(unpool1_c)
deconv1_relu_c = self.relu(deconv1_c)
unpool2_c = torch.cat((deconv1_relu_c, unpool2_), dim=1)
unpool2_c = F.interpolate(unpool2_c, scale_factor=2)
deconv2_c = self.deconv2_c(unpool2_c)
deconv2_relu_c = self.relu(deconv2_c)
unpool3_c = torch.cat((deconv2_relu_c, unpool3_), dim=1)
unpool3_c = F.interpolate(unpool3_c, scale_factor=2)
deconv3_c = self.deconv3_c(unpool3_c)
deconv3_relu_c = self.relu(deconv3_c)
unpool4_c = torch.cat((deconv3_relu_c, unpool4_), dim=1)
unpool4_c = F.interpolate(unpool4_c, scale_factor=2)
deconv4_c = self.deconv4_c(unpool4_c)
deconv4_relu_c = self.relu(deconv4_c)
unpool5_c = torch.cat((deconv4_relu_c, unpool5_), dim=1)
unpool5_c = F.interpolate(unpool5_c, scale_factor=2)
deconv5_c = self.deconv5_c(unpool5_c)
deconv6_sf_c = self.deconv6_sf_c(deconv5_c)
ref0 = pool7.view(-1, 2048 * 4 * 4)
ref1 = self.ref1(ref0)
ref1_relu = self.relu(ref1)
ref2 = self.ref2(ref1_relu)
ref2_relu = self.relu(ref2)
ref3 = self.ref3(ref2_relu)
ref3_relu = self.relu(ref3)
ref4 = self.ref4(ref3_relu)
return deconv6_sf, deconv6_sf_c, ref4
def get_inputs():
return [torch.rand([4, 3, 256, 256])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 512 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 512 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (256 + 2 * x0 + 512 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (257 + 2 * x0 + 512 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16384 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 256 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 256 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (128 + 2 * x0 + 256 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (129 + 2 * x0 + 256 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 1024
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 2048
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_14(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_max_pool2d_with_indices_15(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 2, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (2 * tmp8 + 8 * tmp4 + 16 * x2), None,
eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 2 * tmp8 + 8 * tmp4 + 16 * x2), None,
eviction_policy='evict_last')
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.load(in_ptr1 + (4 + 2 * tmp8 + 8 * tmp4 + 16 * x2), None,
eviction_policy='evict_last')
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.load(in_ptr1 + (5 + 2 * tmp8 + 8 * tmp4 + 16 * x2), None,
eviction_policy='evict_last')
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x4, tmp15, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_16(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_cat_17(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64 % 2048
x3 = xindex // 131072
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 1024, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x2 + 16384 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 2048, tl.int64)
tmp24 = tl.load(in_ptr3 + (tmp8 + 4 * tmp4 + 16 * (-1024 + x2) + 16384 *
x3), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_18(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_cat_19(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256 % 1024
x3 = xindex // 262144
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 512, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x2 + 32768 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 1024, tl.int64)
tmp24 = tl.load(in_ptr3 + (tmp8 + 8 * tmp4 + 64 * (-512 + x2) + 32768 *
x3), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_20(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_cat_21(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x2 = xindex // 1024 % 512
x3 = xindex // 524288
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 256, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x2 + 65536 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 512, tl.int64)
tmp24 = tl.load(in_ptr3 + (tmp8 + 16 * tmp4 + 256 * (-256 + x2) + 65536 *
x3), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_22(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_cat_23(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x2 = xindex // 4096 % 256
x3 = xindex // 1048576
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 128, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x2 + 131072 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 256, tl.int64)
tmp24 = tl.load(in_ptr3 + (tmp8 + 32 * tmp4 + 1024 * (-128 + x2) +
131072 * x3), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_24(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_cat_25(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 128 % 128
x0 = xindex % 128
x2 = xindex // 16384 % 128
x3 = xindex // 2097152
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 64, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 64 * tmp4 + 4096 * x2 + 262144 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 128, tl.int64)
tmp24 = tl.load(in_ptr3 + (tmp8 + 64 * tmp4 + 4096 * (-64 + x2) +
262144 * x3), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_26(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_cat_27(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 256
x0 = xindex % 256
x2 = xindex // 65536 % 64
x3 = xindex // 4194304
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 128, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 32, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 128 * tmp4 + 16384 * x2 + 524288 * x3
), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 64, tl.int64)
tmp24 = tl.load(in_ptr3 + (tmp8 + 128 * tmp4 + 16384 * (-32 + x2) +
524288 * x3), tmp21, eviction_policy='evict_last', other=0.0)
tmp25 = tl.where(tmp13, tmp20, tmp24)
tl.store(out_ptr0 + x5, tmp25, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_28(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused__unsafe_index_cat_29(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64 % 3072
x3 = xindex // 196608
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 1024, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x2 + 16384 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 3072, tl.int64)
tmp24 = -1024 + x2
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + 4 * tmp4 + 16 * (-1024 + x2) + 16384 *
x3), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + (-1024 + x2), tmp27, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tl.full([1], 2048, tl.int64)
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + 4 * tmp4 + 16 * (-1024 + (-1024 + x2)
) + 16384 * x3), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + x5, tmp42, None)
@triton.jit
def triton_poi_fused__unsafe_index_cat_30(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256 % 1536
x3 = xindex // 393216
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 512, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x2 + 32768 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 1536, tl.int64)
tmp24 = -512 + x2
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + 8 * tmp4 + 64 * (-512 + x2) + 32768 *
x3), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + (-512 + x2), tmp27, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tl.full([1], 1024, tl.int64)
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + 8 * tmp4 + 64 * (-512 + (-512 + x2)) +
32768 * x3), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + x5, tmp42, None)
@triton.jit
def triton_poi_fused__unsafe_index_cat_31(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x2 = xindex // 1024 % 768
x3 = xindex // 786432
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 256, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x2 + 65536 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 768, tl.int64)
tmp24 = -256 + x2
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + 16 * tmp4 + 256 * (-256 + x2) + 65536 *
x3), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + (-256 + x2), tmp27, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tl.full([1], 512, tl.int64)
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + 16 * tmp4 + 256 * (-256 + (-256 + x2)
) + 65536 * x3), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + x5, tmp42, None)
@triton.jit
def triton_poi_fused__unsafe_index_cat_32(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x2 = xindex // 4096 % 384
x3 = xindex // 1572864
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 128, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x2 + 131072 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 384, tl.int64)
tmp24 = -128 + x2
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + 32 * tmp4 + 1024 * (-128 + x2) +
131072 * x3), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + (-128 + x2), tmp27, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tl.full([1], 256, tl.int64)
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + 32 * tmp4 + 1024 * (-128 + (-128 + x2
)) + 131072 * x3), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + x5, tmp42, None)
@triton.jit
def triton_poi_fused__unsafe_index_cat_33(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 128 % 128
x0 = xindex % 128
x2 = xindex // 16384 % 192
x3 = xindex // 3145728
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 64, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 64, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 64 * tmp4 + 4096 * x2 + 262144 * x3),
tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 192, tl.int64)
tmp24 = -64 + x2
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + 64 * tmp4 + 4096 * (-64 + x2) +
262144 * x3), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + (-64 + x2), tmp27, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tl.full([1], 128, tl.int64)
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + 64 * tmp4 + 4096 * (-64 + (-64 + x2)) +
262144 * x3), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + x5, tmp42, None)
@triton.jit
def triton_poi_fused__unsafe_index_cat_34(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 256
x0 = xindex % 256
x2 = xindex // 65536 % 96
x3 = xindex // 6291456
x5 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 128, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = x2
tl.full([1], 0, tl.int64)
tmp12 = tl.full([1], 32, tl.int64)
tmp13 = tmp9 < tmp12
tmp14 = tl.load(in_ptr1 + (tmp8 + 128 * tmp4 + 16384 * x2 + 524288 * x3
), tmp13, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr2 + x2, tmp13, eviction_policy='evict_last',
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp13, tmp18, tmp19)
tmp21 = tmp9 >= tmp12
tl.full([1], 96, tl.int64)
tmp24 = -32 + x2
tmp26 = tmp24 < tmp12
tmp27 = tmp26 & tmp21
tmp28 = tl.load(in_ptr3 + (tmp8 + 128 * tmp4 + 16384 * (-32 + x2) +
524288 * x3), tmp27, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr4 + (-32 + x2), tmp27, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 + tmp29
tmp31 = triton_helpers.maximum(tmp17, tmp30)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp27, tmp31, tmp32)
tmp34 = tmp24 >= tmp12
tl.full([1], 64, tl.int64)
tmp37 = tmp34 & tmp21
tmp38 = tl.load(in_ptr5 + (tmp8 + 128 * tmp4 + 16384 * (-32 + (-32 + x2
)) + 524288 * x3), tmp37, eviction_policy='evict_last', other=0.0)
tmp39 = tl.where(tmp26, tmp33, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp21, tmp39, tmp40)
tmp42 = tl.where(tmp13, tmp20, tmp41)
tl.store(out_ptr0 + x5, tmp42, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_35(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_relu_36(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_37(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_38(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_39(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16384 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_40(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_41(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_42(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_43(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_44(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 1024
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 256, 256), (196608, 65536, 256, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (512,), (1,))
assert_size_stride(primals_12, (1024, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_13, (1024,), (1,))
assert_size_stride(primals_14, (2048, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_15, (2048,), (1,))
assert_size_stride(primals_16, (1024, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_17, (1024,), (1,))
assert_size_stride(primals_18, (512, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (256, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_21, (256,), (1,))
assert_size_stride(primals_22, (128, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (128,), (1,))
assert_size_stride(primals_24, (64, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (64,), (1,))
assert_size_stride(primals_26, (32, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (32,), (1,))
assert_size_stride(primals_28, (3, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_29, (3,), (1,))
assert_size_stride(primals_30, (1024, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_31, (1024,), (1,))
assert_size_stride(primals_32, (512, 3072, 3, 3), (27648, 9, 3, 1))
assert_size_stride(primals_33, (512,), (1,))
assert_size_stride(primals_34, (256, 1536, 3, 3), (13824, 9, 3, 1))
assert_size_stride(primals_35, (256,), (1,))
assert_size_stride(primals_36, (128, 768, 3, 3), (6912, 9, 3, 1))
assert_size_stride(primals_37, (128,), (1,))
assert_size_stride(primals_38, (64, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_39, (64,), (1,))
assert_size_stride(primals_40, (32, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_41, (32,), (1,))
assert_size_stride(primals_42, (16, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_43, (16,), (1,))
assert_size_stride(primals_44, (1024, 32768), (32768, 1))
assert_size_stride(primals_45, (1024,), (1,))
assert_size_stride(primals_46, (256, 1024), (1024, 1))
assert_size_stride(primals_47, (256,), (1,))
assert_size_stride(primals_48, (64, 256), (256, 1))
assert_size_stride(primals_49, (64,), (1,))
assert_size_stride(primals_50, (11, 64), (64, 1))
assert_size_stride(primals_51, (11,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 256, 256), (2097152, 65536, 256, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(8388608)](buf1, primals_2,
8388608, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128, 1
), torch.float32)
buf3 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128, 1
), torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(2097152)](buf1,
buf2, buf3, 2097152, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 128, 128), (1048576, 16384, 128, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(4194304)](buf5, primals_5,
4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(1048576)](buf5,
buf6, buf7, 1048576, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(2097152)](buf9, primals_7,
2097152, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(524288)](buf9,
buf10, buf11, 524288, XBLOCK=512, num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 32, 32), (262144, 1024, 32, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_6[grid(1048576)](buf13, primals_9,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.float32)
buf15 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_7[grid(262144)](buf13,
buf14, buf15, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf16 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 512, 16, 16), (131072, 256, 16, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_8[grid(524288)](buf17, primals_11,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf18 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.float32)
buf19 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_9[grid(131072)](buf17,
buf18, buf19, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf20 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 1024, 8, 8), (65536, 64, 8, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_10[grid(262144)](buf21,
primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf22 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1),
torch.float32)
buf23 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_11[grid(65536)](buf21,
buf22, buf23, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 2048, 4, 4), (32768, 16, 4, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_12[grid(131072)](buf25,
primals_15, 131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_15
buf26 = empty_strided_cuda((4, 2048, 2, 2), (8192, 4, 2, 1), torch.int8
)
buf63 = empty_strided_cuda((4, 2048, 2, 2), (8192, 4, 2, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_13[grid(32768)](buf25,
buf26, buf63, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf27 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_14[grid(4)](buf27, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((4, 2048, 4, 4), (32768, 16, 4, 1),
torch.float32)
triton_poi_fused__unsafe_index_max_pool2d_with_indices_15[grid(131072)
](buf27, buf25, buf28, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
buf29 = extern_kernels.convolution(buf28, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf29, (4, 1024, 4, 4), (16384, 16, 4, 1))
buf30 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_16[grid(8)](buf30, 8,
XBLOCK=8, num_warps=1, num_stages=1)
buf31 = empty_strided_cuda((4, 2048, 8, 8), (131072, 64, 8, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_17[grid(524288)](buf30, buf29,
primals_17, buf22, buf31, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
buf32 = extern_kernels.convolution(buf31, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 512, 8, 8), (32768, 64, 8, 1))
buf33 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_18[grid(16)](buf33, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf34 = empty_strided_cuda((4, 1024, 16, 16), (262144, 256, 16, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_19[grid(1048576)](buf33, buf32,
primals_19, buf18, buf34, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
buf35 = extern_kernels.convolution(buf34, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 256, 16, 16), (65536, 256, 16, 1))
buf36 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_20[grid(32)](buf36, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf37 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_21[grid(2097152)](buf36, buf35,
primals_21, buf14, buf37, 2097152, XBLOCK=1024, num_warps=4,
num_stages=1)
buf38 = extern_kernels.convolution(buf37, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf39 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_22[grid(64)](buf39, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf40 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_23[grid(4194304)](buf39, buf38,
primals_23, buf10, buf40, 4194304, XBLOCK=1024, num_warps=4,
num_stages=1)
buf41 = extern_kernels.convolution(buf40, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf42 = empty_strided_cuda((128,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_24[grid(128)](buf42, 128,
XBLOCK=128, num_warps=4, num_stages=1)
buf43 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128,
1), torch.float32)
triton_poi_fused__unsafe_index_cat_25[grid(8388608)](buf42, buf41,
primals_25, buf6, buf43, 8388608, XBLOCK=1024, num_warps=4,
num_stages=1)
buf44 = extern_kernels.convolution(buf43, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 32, 128, 128), (524288, 16384, 128, 1))
buf45 = empty_strided_cuda((256,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_26[grid(256)](buf45, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf46 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256,
1), torch.float32)
triton_poi_fused__unsafe_index_cat_27[grid(16777216)](buf45, buf44,
primals_27, buf2, buf46, 16777216, XBLOCK=1024, num_warps=4,
num_stages=1)
buf47 = extern_kernels.convolution(buf46, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 3, 256, 256), (196608, 65536, 256, 1))
buf48 = buf47
del buf47
triton_poi_fused_convolution_sigmoid_28[grid(786432)](buf48,
primals_29, 786432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_29
buf49 = extern_kernels.convolution(buf28, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 1024, 4, 4), (16384, 16, 4, 1))
buf50 = empty_strided_cuda((4, 3072, 8, 8), (196608, 64, 8, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_29[grid(786432)](buf30, buf49,
primals_31, buf29, primals_17, buf22, buf50, 786432, XBLOCK=
1024, num_warps=4, num_stages=1)
buf51 = extern_kernels.convolution(buf50, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 512, 8, 8), (32768, 64, 8, 1))
buf52 = empty_strided_cuda((4, 1536, 16, 16), (393216, 256, 16, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_30[grid(1572864)](buf33, buf51,
primals_33, buf32, primals_19, buf18, buf52, 1572864, XBLOCK=
1024, num_warps=4, num_stages=1)
buf53 = extern_kernels.convolution(buf52, primals_34, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 256, 16, 16), (65536, 256, 16, 1))
buf54 = empty_strided_cuda((4, 768, 32, 32), (786432, 1024, 32, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_31[grid(3145728)](buf36, buf53,
primals_35, buf35, primals_21, buf14, buf54, 3145728, XBLOCK=
1024, num_warps=4, num_stages=1)
buf55 = extern_kernels.convolution(buf54, primals_36, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf56 = empty_strided_cuda((4, 384, 64, 64), (1572864, 4096, 64, 1),
torch.float32)
triton_poi_fused__unsafe_index_cat_32[grid(6291456)](buf39, buf55,
primals_37, buf38, primals_23, buf10, buf56, 6291456, XBLOCK=
1024, num_warps=4, num_stages=1)
buf57 = extern_kernels.convolution(buf56, primals_38, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf58 = empty_strided_cuda((4, 192, 128, 128), (3145728, 16384, 128,
1), torch.float32)
triton_poi_fused__unsafe_index_cat_33[grid(12582912)](buf42, buf57,
primals_39, buf41, primals_25, buf6, buf58, 12582912, XBLOCK=
1024, num_warps=4, num_stages=1)
buf59 = extern_kernels.convolution(buf58, primals_40, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 32, 128, 128), (524288, 16384, 128, 1))
buf60 = empty_strided_cuda((4, 96, 256, 256), (6291456, 65536, 256,
1), torch.float32)
triton_poi_fused__unsafe_index_cat_34[grid(25165824)](buf45, buf59,
primals_41, buf44, primals_27, buf2, buf60, 25165824, XBLOCK=
1024, num_warps=4, num_stages=1)
buf61 = extern_kernels.convolution(buf60, primals_42, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 16, 256, 256), (1048576, 65536, 256, 1))
buf62 = buf61
del buf61
triton_poi_fused_convolution_sigmoid_35[grid(4194304)](buf62,
primals_43, 4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_43
buf64 = empty_strided_cuda((1, 1024), (1024, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf63, (1, 32768), (0, 1), 0),
reinterpret_tensor(primals_44, (32768, 1024), (1, 32768), 0),
out=buf64)
buf65 = buf64
del buf64
triton_poi_fused_relu_36[grid(1024)](buf65, primals_45, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_45
buf66 = empty_strided_cuda((1, 256), (256, 1), torch.float32)
extern_kernels.mm(buf65, reinterpret_tensor(primals_46, (1024, 256),
(1, 1024), 0), out=buf66)
buf67 = buf66
del buf66
triton_poi_fused_relu_37[grid(256)](buf67, primals_47, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_47
buf68 = empty_strided_cuda((1, 64), (64, 1), torch.float32)
extern_kernels.mm(buf67, reinterpret_tensor(primals_48, (256, 64),
(1, 256), 0), out=buf68)
buf69 = buf68
del buf68
triton_poi_fused_relu_38[grid(64)](buf69, primals_49, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_49
buf70 = empty_strided_cuda((1, 11), (11, 1), torch.float32)
extern_kernels.addmm(primals_51, buf69, reinterpret_tensor(
primals_50, (64, 11), (1, 64), 0), alpha=1, beta=1, out=buf70)
del primals_51
buf71 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128,
1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_39[grid(2097152)](
buf59, primals_41, buf71, 2097152, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf59
del primals_41
buf72 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_40[grid(1048576)](
buf57, primals_39, buf72, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf57
del primals_39
buf73 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_41[grid(524288)](
buf55, primals_37, buf73, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf55
del primals_37
buf74 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_42[grid(262144)](
buf53, primals_35, buf74, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf53
del primals_35
buf75 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_43[grid(131072)](
buf51, primals_33, buf75, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf51
del primals_33
buf76 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_44[grid(65536)](
buf49, primals_31, buf76, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf49
del primals_31
buf77 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128,
1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_39[grid(2097152)](
buf44, primals_27, buf77, 2097152, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf44
del primals_27
buf78 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_40[grid(1048576)](
buf41, primals_25, buf78, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf41
del primals_25
buf79 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_41[grid(524288)](
buf38, primals_23, buf79, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf38
del primals_23
buf80 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_42[grid(262144)](
buf35, primals_21, buf80, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf35
del primals_21
buf81 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_43[grid(131072)](
buf32, primals_19, buf81, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf32
del primals_19
buf82 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_44[grid(65536)](
buf29, primals_17, buf82, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf29
del primals_17
return (buf48, buf62, buf70, primals_1, primals_3, primals_4, primals_6,
primals_8, primals_10, primals_12, primals_14, primals_16,
primals_18, primals_20, primals_22, primals_24, primals_26,
primals_28, primals_30, primals_32, primals_34, primals_36,
primals_38, primals_40, primals_42, buf1, buf2, buf3, buf5, buf6,
buf7, buf9, buf10, buf11, buf13, buf14, buf15, buf17, buf18, buf19,
buf21, buf22, buf23, buf25, buf26, buf27, buf28, buf30, buf31,
buf33, buf34, buf36, buf37, buf39, buf40, buf42, buf43, buf45,
buf46, buf48, buf50, buf52, buf54, buf56, buf58, buf60, buf62,
reinterpret_tensor(buf63, (1, 32768), (32768, 1), 0), buf65, buf67,
buf69, primals_50, primals_48, primals_46, primals_44, buf71, buf72,
buf73, buf74, buf75, buf76, buf77, buf78, buf79, buf80, buf81, buf82)
class LayoutNetNew(nn.Module):
def __init__(self):
super(LayoutNetNew, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1, stride=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=1)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1)
self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1)
self.conv5 = nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=1)
self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=1, stride=1)
self.conv7 = nn.Conv2d(1024, 2048, kernel_size=3, padding=1, stride=1)
self.deconv00 = nn.Conv2d(2048, 1024, kernel_size=3, padding=1,
stride=1)
self.deconv0 = nn.Conv2d(1024 * 2, 512, kernel_size=3, padding=1,
stride=1)
self.deconv1 = nn.Conv2d(512 * 2, 256, kernel_size=3, padding=1,
stride=1)
self.deconv2 = nn.Conv2d(256 * 2, 128, kernel_size=3, padding=1,
stride=1)
self.deconv3 = nn.Conv2d(128 * 2, 64, kernel_size=3, padding=1,
stride=1)
self.deconv4 = nn.Conv2d(64 * 2, 32, kernel_size=3, padding=1, stride=1
)
self.deconv5 = nn.Conv2d(32 * 2, 3, kernel_size=3, padding=1, stride=1)
self.deconv6_sf = nn.Sigmoid()
self.deconv00_c = nn.Conv2d(2048, 1024, kernel_size=3, padding=1,
stride=1)
self.deconv0_c = nn.Conv2d(1024 * 3, 512, kernel_size=3, padding=1,
stride=1)
self.deconv1_c = nn.Conv2d(512 * 3, 256, kernel_size=3, padding=1,
stride=1)
self.deconv2_c = nn.Conv2d(256 * 3, 128, kernel_size=3, padding=1,
stride=1)
self.deconv3_c = nn.Conv2d(128 * 3, 64, kernel_size=3, padding=1,
stride=1)
self.deconv4_c = nn.Conv2d(64 * 3, 32, kernel_size=3, padding=1,
stride=1)
self.deconv5_c = nn.Conv2d(32 * 3, 16, kernel_size=3, padding=1,
stride=1)
self.deconv6_sf_c = nn.Sigmoid()
self.ref1 = nn.Linear(2048 * 4 * 4, 1024)
self.ref2 = nn.Linear(1024, 256)
self.ref3 = nn.Linear(256, 64)
self.ref4 = nn.Linear(64, 11)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.conv6.weight
primals_13 = self.conv6.bias
primals_14 = self.conv7.weight
primals_15 = self.conv7.bias
primals_16 = self.deconv00.weight
primals_17 = self.deconv00.bias
primals_18 = self.deconv0.weight
primals_19 = self.deconv0.bias
primals_20 = self.deconv1.weight
primals_21 = self.deconv1.bias
primals_22 = self.deconv2.weight
primals_23 = self.deconv2.bias
primals_24 = self.deconv3.weight
primals_25 = self.deconv3.bias
primals_26 = self.deconv4.weight
primals_27 = self.deconv4.bias
primals_28 = self.deconv5.weight
primals_29 = self.deconv5.bias
primals_30 = self.deconv00_c.weight
primals_31 = self.deconv00_c.bias
primals_32 = self.deconv0_c.weight
primals_33 = self.deconv0_c.bias
primals_34 = self.deconv1_c.weight
primals_35 = self.deconv1_c.bias
primals_36 = self.deconv2_c.weight
primals_37 = self.deconv2_c.bias
primals_38 = self.deconv3_c.weight
primals_39 = self.deconv3_c.bias
primals_40 = self.deconv4_c.weight
primals_41 = self.deconv4_c.bias
primals_42 = self.deconv5_c.weight
primals_43 = self.deconv5_c.bias
primals_44 = self.ref1.weight
primals_45 = self.ref1.bias
primals_46 = self.ref2.weight
primals_47 = self.ref2.bias
primals_48 = self.ref3.weight
primals_49 = self.ref3.bias
primals_50 = self.ref4.weight
primals_51 = self.ref4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51])
return output[0], output[1], output[2]
| wellowdata/pytorch-layoutnet | LayoutNet | false | 16,857 | [
"MIT"
] | 155 | 3d4352f94ed00d3c37890e9119452811d4f0893f | https://github.com/wellowdata/pytorch-layoutnet/tree/3d4352f94ed00d3c37890e9119452811d4f0893f |
ClassNetVideoConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/t2/ct2o6l3yyekgusbxgfioo6nglz7gryhjfbojbdjm3w3q777vkgix.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8388608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 262144) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1024, 64, 64, 64), (268435456, 262144, 4096, 64, 1))
assert_size_stride(primals_2, (8, 1024, 1, 1, 1), (1024, 1, 1, 1, 1))
assert_size_stride(primals_3, (8, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64, 64), (2097152, 262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 8388608, grid=grid(8388608), stream=stream0)
del primals_3
return (buf1, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1024, 64, 64, 64), (268435456, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, 1024, 1, 1, 1), (1024, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Unit3D(nn.Module):
"""Basic unit containing Conv3D + BatchNorm + non-linearity."""
def __init__(self, in_channels, output_channels, kernel_shape=(1, 1, 1),
stride=(1, 1, 1), padding=0, activation_fn=F.relu, use_batch_norm=
True, use_bias=False, name='unit_3d'):
"""Initializes Unit3D module."""
super(Unit3D, self).__init__()
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
self.name = name
self.padding = padding
self.conv3d = nn.Conv3d(in_channels=in_channels, out_channels=self.
_output_channels, kernel_size=self._kernel_shape, stride=self.
_stride, padding=0, bias=self._use_bias)
if self._use_batch_norm:
self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001,
momentum=0.01)
def compute_pad(self, dim, s):
"""Get the zero padding number."""
if s % self._stride[dim] == 0:
return max(self._kernel_shape[dim] - self._stride[dim], 0)
else:
return max(self._kernel_shape[dim] - s % self._stride[dim], 0)
def forward(self, x):
"""
Connects the module to inputs. Dynamically pad based on input size in forward function.
Args:
x: Inputs to the Unit3D component.
Returns:
Outputs from the module.
"""
_batch, _channel, time, height, width = x.size()
pad_t = self.compute_pad(0, time)
pad_h = self.compute_pad(1, height)
pad_w = self.compute_pad(2, width)
pad_t_front = pad_t // 2
pad_t_back = pad_t - pad_t_front
pad_h_front = pad_h // 2
pad_h_back = pad_h - pad_h_front
pad_w_front = pad_w // 2
pad_w_back = pad_w - pad_w_front
pad = (pad_w_front, pad_w_back, pad_h_front, pad_h_back,
pad_t_front, pad_t_back)
x = F.pad(x, pad)
x = self.conv3d(x)
if self._use_batch_norm:
x = self.bn(x)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class ClassNetVideoConv(nn.Module):
"""Classifier network for video input refer to MMSADA.
Args:
input_size (int, optional): the dimension of the final feature vector. Defaults to 1024.
n_class (int, optional): the number of classes. Defaults to 8.
References:
Munro Jonathan, and Dima Damen. "Multi-modal domain adaptation for fine-grained action recognition."
In CVPR, pp. 122-132. 2020.
"""
def __init__(self, input_size=1024, n_class=8):
super(ClassNetVideoConv, self).__init__()
self.dp = nn.Dropout()
self.logits = Unit3D(in_channels=input_size, output_channels=
n_class, kernel_shape=[1, 1, 1], padding=0, activation_fn=None,
use_batch_norm=False, use_bias=True)
def forward(self, input):
x = self.logits(self.dp(input))
return x
def get_inputs():
return [torch.rand([4, 1024, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 262144 % 8
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1024, 64, 64, 64), (268435456, 262144,
4096, 64, 1))
assert_size_stride(primals_2, (8, 1024, 1, 1, 1), (1024, 1, 1, 1, 1))
assert_size_stride(primals_3, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64, 64), (2097152, 262144, 4096,
64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(8388608)](buf1, primals_3,
8388608, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
return buf1, primals_1, primals_2
class Unit3D(nn.Module):
"""Basic unit containing Conv3D + BatchNorm + non-linearity."""
def __init__(self, in_channels, output_channels, kernel_shape=(1, 1, 1),
stride=(1, 1, 1), padding=0, activation_fn=F.relu, use_batch_norm=
True, use_bias=False, name='unit_3d'):
"""Initializes Unit3D module."""
super(Unit3D, self).__init__()
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
self.name = name
self.padding = padding
self.conv3d = nn.Conv3d(in_channels=in_channels, out_channels=self.
_output_channels, kernel_size=self._kernel_shape, stride=self.
_stride, padding=0, bias=self._use_bias)
if self._use_batch_norm:
self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001,
momentum=0.01)
def compute_pad(self, dim, s):
"""Get the zero padding number."""
if s % self._stride[dim] == 0:
return max(self._kernel_shape[dim] - self._stride[dim], 0)
else:
return max(self._kernel_shape[dim] - s % self._stride[dim], 0)
def forward(self, x):
"""
Connects the module to inputs. Dynamically pad based on input size in forward function.
Args:
x: Inputs to the Unit3D component.
Returns:
Outputs from the module.
"""
_batch, _channel, time, height, width = x.size()
pad_t = self.compute_pad(0, time)
pad_h = self.compute_pad(1, height)
pad_w = self.compute_pad(2, width)
pad_t_front = pad_t // 2
pad_t_back = pad_t - pad_t_front
pad_h_front = pad_h // 2
pad_h_back = pad_h - pad_h_front
pad_w_front = pad_w // 2
pad_w_back = pad_w - pad_w_front
pad = (pad_w_front, pad_w_back, pad_h_front, pad_h_back,
pad_t_front, pad_t_back)
x = F.pad(x, pad)
x = self.conv3d(x)
if self._use_batch_norm:
x = self.bn(x)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class ClassNetVideoConvNew(nn.Module):
"""Classifier network for video input refer to MMSADA.
Args:
input_size (int, optional): the dimension of the final feature vector. Defaults to 1024.
n_class (int, optional): the number of classes. Defaults to 8.
References:
Munro Jonathan, and Dima Damen. "Multi-modal domain adaptation for fine-grained action recognition."
In CVPR, pp. 122-132. 2020.
"""
def __init__(self, input_size=1024, n_class=8):
super(ClassNetVideoConvNew, self).__init__()
self.dp = nn.Dropout()
self.logits = Unit3D(in_channels=input_size, output_channels=
n_class, kernel_shape=[1, 1, 1], padding=0, activation_fn=None,
use_batch_norm=False, use_bias=True)
def forward(self, input_0):
primals_2 = self.logits.conv3d.weight
primals_3 = self.logits.conv3d.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| SheffieldAI/pykale | ClassNetVideoConv | false | 16,858 | [
"MIT"
] | 324 | be7670941fb06835883c80477b26702d407017db | https://github.com/SheffieldAI/pykale/tree/be7670941fb06835883c80477b26702d407017db |
single_param | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/5f/c5fznegpftron72u7wabsoceikv45hwkhtip5z5dlcad6n2y2vqx.py
# Topologically Sorted Source Nodes: [abs_1], Original ATen: [aten.abs]
# Source node to ATen node mapping:
# abs_1 => abs_1
# Graph fragment:
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%primals_1,), kwargs = {})
triton_poi_fused_abs_0 = async_compile.triton('triton_poi_fused_abs_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl_math.abs(tmp1)
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [abs_1], Original ATen: [aten.abs]
stream0 = get_raw_stream(0)
triton_poi_fused_abs_0.run(primals_1, buf0, 1, grid=grid(1), stream=stream0)
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.distributions
class single_param(nn.Module):
def __init__(self, value):
super(single_param, self).__init__()
self.p = nn.Parameter(torch.FloatTensor([value]))
def forward(self):
return torch.abs(self.p)
def get_inputs():
return []
def get_init_inputs():
return [[], {'value': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl_math.abs(tmp1)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp2, None)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_0[grid(1)](primals_1, buf0, 1, XBLOCK=1,
num_warps=1, num_stages=1)
return buf0, primals_1
class single_paramNew(nn.Module):
def __init__(self, value):
super(single_paramNew, self).__init__()
self.p = nn.Parameter(torch.FloatTensor([value]))
def forward(self):
primals_1 = self.p
output = call([primals_1])
return output[0]
| AaltoML/PeriodicBNN | single_param | false | 16,859 | [
"MIT"
] | 9 | 1638edb365641e7fe2ea2ab3c15b9439473f9cf3 | https://github.com/AaltoML/PeriodicBNN/tree/1638edb365641e7fe2ea2ab3c15b9439473f9cf3 |
VertexDirectEmbedder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/un/cun734f5enrc2ri2vvbj5kc7nckuevxwwf4pdpdexgigc4qzdubh.py
# Topologically Sorted Source Nodes: [norm, clamp, truediv], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div]
# Source node to ATen node mapping:
# clamp => clamp_min
# norm => pow_1, pow_2, sum_1
# truediv => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %clamp_min), kwargs = {})
triton_poi_fused_clamp_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, clamp, truediv], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06
) ->torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim=
True), min=epsilon)
class VertexDirectEmbedder(nn.Module):
"""
Class responsible for embedding vertices. Vertex embeddings take
the form of a tensor of size [N, D], where
N = number of vertices
D = number of dimensions in the embedding space
"""
def __init__(self, num_vertices: 'int', embed_dim: 'int'):
"""
Initialize embedder, set random embeddings
Args:
num_vertices (int): number of vertices to embed
embed_dim (int): number of dimensions in the embedding space
"""
super(VertexDirectEmbedder, self).__init__()
self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim))
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
"""
Reset embeddings to random values
"""
torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5)
def forward(self) ->torch.Tensor:
"""
Produce vertex embeddings, a tensor of shape [N, D] where:
N = number of vertices
D = number of dimensions in the embedding space
Return:
Full vertex embeddings, a tensor of shape [N, D]
"""
return normalize_embeddings(self.embeddings)
@torch.no_grad()
def load(self, fpath: 'str'):
"""
Load data from a file
Args:
fpath (str): file path to load data from
"""
with PathManager.open(fpath, 'rb') as hFile:
data = pickle.load(hFile)
for name in ['embeddings']:
if name in data:
getattr(self, name).copy_(torch.tensor(data[name]).float())
def get_inputs():
return []
def get_init_inputs():
return [[], {'num_vertices': 4, 'embed_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(16)](primals_1,
buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf0, primals_1
def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06
) ->torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim=
True), min=epsilon)
class VertexDirectEmbedderNew(nn.Module):
"""
Class responsible for embedding vertices. Vertex embeddings take
the form of a tensor of size [N, D], where
N = number of vertices
D = number of dimensions in the embedding space
"""
def __init__(self, num_vertices: 'int', embed_dim: 'int'):
"""
Initialize embedder, set random embeddings
Args:
num_vertices (int): number of vertices to embed
embed_dim (int): number of dimensions in the embedding space
"""
super(VertexDirectEmbedderNew, self).__init__()
self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim))
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
"""
Reset embeddings to random values
"""
torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5)
@torch.no_grad()
def load(self, fpath: 'str'):
"""
Load data from a file
Args:
fpath (str): file path to load data from
"""
with PathManager.open(fpath, 'rb') as hFile:
data = pickle.load(hFile)
for name in ['embeddings']:
if name in data:
getattr(self, name).copy_(torch.tensor(data[name]).float())
def forward(self):
primals_1 = self.embeddings
output = call([primals_1])
return output[0]
| AbirKhan96/facebook-detectron2 | VertexDirectEmbedder | false | 16,860 | [
"Apache-2.0"
] | 5 | 6a3bf813353d74bbeb8674e3566e7bbb33eb5c87 | https://github.com/AbirKhan96/facebook-detectron2/tree/6a3bf813353d74bbeb8674e3566e7bbb33eb5c87 |
IIDIsotropicGaussianUVLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/57/c57fbdxlpuego6d4edvsufdvj2wgfjgio4escf24ktyiq3s7ahqd.py
# Topologically Sorted Source Nodes: [softplus, sigma2, log, mul, add_2, sub, pow_1, sub_1, pow_2, delta_t_delta, truediv, add_3, loss, sum_1], Original ATen: [aten.softplus, aten.add, aten.log, aten.mul, aten.sub, aten.pow, aten.div, aten.sum]
# Source node to ATen node mapping:
# add_2 => add_2
# add_3 => add_3
# delta_t_delta => add_1
# log => log
# loss => mul_1
# mul => mul
# pow_1 => pow_1
# pow_2 => pow_2
# sigma2 => add
# softplus => exp, gt, log1p, where
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# truediv => div
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, 4), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1.8378770664093453), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg2_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg4_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %div), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 0.5), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=(6,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp13 = tl.load(in_ptr1 + (r0), None)
tmp14 = tl.load(in_ptr2 + (r0), None)
tmp17 = tl.load(in_ptr3 + (r0), None)
tmp18 = tl.load(in_ptr4 + (r0), None)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 4.0
tmp7 = tmp5 + tmp6
tmp8 = tl_math.log(tmp7)
tmp9 = 2.0
tmp10 = tmp8 * tmp9
tmp11 = 1.8378770664093453
tmp12 = tmp10 + tmp11
tmp15 = tmp13 - tmp14
tmp16 = tmp15 * tmp15
tmp19 = tmp17 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tmp16 + tmp20
tmp22 = tmp21 / tmp7
tmp23 = tmp12 + tmp22
tmp24 = 0.5
tmp25 = tmp23 * tmp24
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp28, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [softplus, sigma2, log, mul, add_2, sub, pow_1, sub_1, pow_2, delta_t_delta, truediv, add_3, loss, sum_1], Original ATen: [aten.softplus, aten.add, aten.log, aten.mul, aten.sub, aten.pow, aten.div, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0.run(arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.utils.data
import torch.nn.functional as F
from torch import nn
class IIDIsotropicGaussianUVLoss(nn.Module):
"""
Loss for the case of iid residuals with isotropic covariance:
$Sigma_i = sigma_i^2 I$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: 'float'):
super(IIDIsotropicGaussianUVLoss, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(self, u: 'torch.Tensor', v: 'torch.Tensor', sigma_u:
'torch.Tensor', target_u: 'torch.Tensor', target_v: 'torch.Tensor'):
sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
delta_t_delta = (u - target_u) ** 2 + (v - target_v) ** 2
loss = 0.5 * (self.log2pi + 2 * torch.log(sigma2) + delta_t_delta /
sigma2)
return loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'sigma_lower_bound': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp13 = tl.load(in_ptr1 + r0, None)
tmp14 = tl.load(in_ptr2 + r0, None)
tmp17 = tl.load(in_ptr3 + r0, None)
tmp18 = tl.load(in_ptr4 + r0, None)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 4.0
tmp7 = tmp5 + tmp6
tmp8 = tl_math.log(tmp7)
tmp9 = 2.0
tmp10 = tmp8 * tmp9
tmp11 = 1.8378770664093453
tmp12 = tmp10 + tmp11
tmp15 = tmp13 - tmp14
tmp16 = tmp15 * tmp15
tmp19 = tmp17 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tmp16 + tmp20
tmp22 = tmp21 / tmp7
tmp23 = tmp12 + tmp22
tmp24 = 0.5
tmp25 = tmp23 * tmp24
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0[grid(1)](arg0_1
, arg1_1, arg2_1, arg3_1, arg4_1, buf0, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
return buf0,
class IIDIsotropicGaussianUVLossNew(nn.Module):
"""
Loss for the case of iid residuals with isotropic covariance:
$Sigma_i = sigma_i^2 I$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: 'float'):
super(IIDIsotropicGaussianUVLossNew, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(self, input_0, input_1, input_2, input_3, input_4):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
arg4_1 = input_4
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return output[0]
| AbirKhan96/facebook-detectron2 | IIDIsotropicGaussianUVLoss | false | 16,861 | [
"Apache-2.0"
] | 5 | 6a3bf813353d74bbeb8674e3566e7bbb33eb5c87 | https://github.com/AbirKhan96/facebook-detectron2/tree/6a3bf813353d74bbeb8674e3566e7bbb33eb5c87 |
LastLevelMaxPool | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/6d/c6d6s7dv3ssfl24e66nrldm3hrtoced4owhc56tooquatdvctqfh.py
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn.functional as F
from torch import nn
class LastLevelMaxPool(nn.Module):
"""
This module is used in the original FPN to generate a downsampled
P6 feature from P5.
"""
def __init__(self):
super().__init__()
self.num_levels = 1
self.in_feature = 'p5'
def forward(self, x):
return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class LastLevelMaxPoolNew(nn.Module):
"""
This module is used in the original FPN to generate a downsampled
P6 feature from P5.
"""
def __init__(self):
super().__init__()
self.num_levels = 1
self.in_feature = 'p5'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| AbirKhan96/facebook-detectron2 | LastLevelMaxPool | false | 16,862 | [
"Apache-2.0"
] | 5 | 6a3bf813353d74bbeb8674e3566e7bbb33eb5c87 | https://github.com/AbirKhan96/facebook-detectron2/tree/6a3bf813353d74bbeb8674e3566e7bbb33eb5c87 |
HardSigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/in/cinpnfhrionin5gpquwoph6guoa32baao7labi6wulqruki5b6sc.py
# Topologically Sorted Source Nodes: [add, relu6, truediv], Original ATen: [aten.add, aten.hardtanh, aten.div]
# Source node to ATen node mapping:
# add => add
# relu6 => clamp_max, clamp_min
# truediv => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3.0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6.0), kwargs = {})
triton_poi_fused_add_div_hardtanh_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, relu6, truediv], Original ATen: [aten.add, aten.hardtanh, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.utils.data.distributed
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
def hard_sigmoid(input_, inplace: 'bool'=False):
"""hard sigmoid function"""
if inplace:
return input_.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
return F.relu6(input_ + 3.0) / 6.0
class HardSigmoid(nn.Module):
"""hard sigmoid module"""
def __init__(self, inplace: 'bool'=False):
super().__init__()
self.inplace = inplace
def forward(self, input_):
return hard_sigmoid(input_, self.inplace)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.utils.data.distributed
from torch import nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def hard_sigmoid(input_, inplace: 'bool'=False):
"""hard sigmoid function"""
if inplace:
return input_.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
return F.relu6(input_ + 3.0) / 6.0
class HardSigmoidNew(nn.Module):
"""hard sigmoid module"""
def __init__(self, inplace: 'bool'=False):
super().__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Adlik/zen_nas | HardSigmoid | false | 16,863 | [
"Apache-2.0"
] | 7 | d820d5c7d5bbb6fd66a76d5f16513647d6ea7a57 | https://github.com/Adlik/zen_nas/tree/d820d5c7d5bbb6fd66a76d5f16513647d6ea7a57 |
ResizeTransform | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/hl/chlilsvuwckmguymgdsz3emilese4fxitlrwtijcidjhwlmxq66x.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1, add_1, clamp_max_1, clamp_min, clamp_min_1, convert_element_type, convert_element_type_1, iota, mul, mul_1, sub, sub_1
# x_1 => mul_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (1,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 0), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 0.0), kwargs = {})
# %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min, torch.int64), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max]), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.25), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 - tmp0
tmp3 = 0.0
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp6 = 0.25
tmp7 = tmp5 * tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as nnf
import torch.utils
class ResizeTransform(nn.Module):
"""
Resize a transform, which involves resizing the vector field *and* rescaling it.
"""
def __init__(self, vel_resize, ndims):
super().__init__()
self.factor = 1.0 / vel_resize
self.mode = 'linear'
if ndims == 2:
self.mode = 'bi' + self.mode
elif ndims == 3:
self.mode = 'tri' + self.mode
def forward(self, x):
if self.factor < 1:
x = nnf.interpolate(x, align_corners=True, scale_factor=self.
factor, mode=self.mode)
x = self.factor * x
elif self.factor > 1:
x = self.factor * x
x = nnf.interpolate(x, align_corners=True, scale_factor=self.
factor, mode=self.mode)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'vel_resize': 4, 'ndims': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 - tmp0
tmp3 = 0.0
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp6 = 0.25
tmp7 = tmp5 * tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class ResizeTransformNew(nn.Module):
"""
Resize a transform, which involves resizing the vector field *and* rescaling it.
"""
def __init__(self, vel_resize, ndims):
super().__init__()
self.factor = 1.0 / vel_resize
self.mode = 'linear'
if ndims == 2:
self.mode = 'bi' + self.mode
elif ndims == 3:
self.mode = 'tri' + self.mode
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Alison-brie/AutoReg | ResizeTransform | false | 16,864 | [
"MIT"
] | 10 | a23d45a6f7c6e47f61430e1565dda316452a4418 | https://github.com/Alison-brie/AutoReg/tree/a23d45a6f7c6e47f61430e1565dda316452a4418 |
Conv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/gt/cgtkxck7xi4jvt5w42qtfq4jx6nnr2ig32ijc37sud5zoaiejz2w.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
return (buf1, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn.functional as F
class Conv2d(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop('norm', None)
activation = kwargs.pop('activation', None)
super().__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, x):
if not torch.jit.is_scripting():
if x.numel() == 0 and self.training:
assert not isinstance(self.norm, torch.nn.SyncBatchNorm
), 'SyncBatchNorm does not support empty inputs!'
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf1, primals_1, primals_2
class Conv2dNew(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop('norm', None)
activation = kwargs.pop('activation', None)
super().__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| AbirKhan96/facebook-detectron2 | Conv2d | false | 16,865 | [
"Apache-2.0"
] | 5 | 6a3bf813353d74bbeb8674e3566e7bbb33eb5c87 | https://github.com/AbirKhan96/facebook-detectron2/tree/6a3bf813353d74bbeb8674e3566e7bbb33eb5c87 |
Linear_softmax | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/kq/ckqglep2uxxmtpvso6erxkbhcljhsx4d2chzqc2ps72gtorqejiz.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/tv/ctvxncyzu7pcogaepwuywlkrtigk7ke4tdgbkdfypvu626jp7xvj.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf1
return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Linear_softmax(nn.Module):
def __init__(self, inp, out):
super(Linear_softmax, self).__init__()
self.f1 = nn.Linear(inp, out)
def forward(self, x):
x = self.f1(x)
return F.softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inp': 4, 'out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class Linear_softmaxNew(nn.Module):
def __init__(self, inp, out):
super(Linear_softmaxNew, self).__init__()
self.f1 = nn.Linear(inp, out)
def forward(self, input_0):
primals_1 = self.f1.weight
primals_2 = self.f1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Alfo5123/ConcreteDropout | Linear_softmax | false | 16,866 | [
"MIT"
] | 7 | c442871553e20a2de078c0fbac7fa52302d50abf | https://github.com/Alfo5123/ConcreteDropout/tree/c442871553e20a2de078c0fbac7fa52302d50abf |
IndepAnisotropicGaussianUVLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/2z/c2z34bxef3s6z25acg32xjyk7vip7zho4eymd7dqa74zlzd5t6fu.py
# Topologically Sorted Source Nodes: [softplus, sigma2, pow_1, pow_2, r_sqnorm2, add_4, denom2, log, add_5, delta_u, pow_3, delta_v, pow_4, delta_sqnorm, truediv, add_6, delta_u_r_u, delta_v_r_v, delta_r, delta_r_sqnorm, truediv_1, sub_2, loss, sum_1], Original ATen: [aten.softplus, aten.add, aten.pow, aten.mul, aten.log, aten.sub, aten.div, aten.sum]
# Source node to ATen node mapping:
# add_4 => add_4
# add_5 => add_5
# add_6 => add_6
# delta_r => add_3
# delta_r_sqnorm => pow_5
# delta_sqnorm => add_2
# delta_u => sub
# delta_u_r_u => mul
# delta_v => sub_1
# delta_v_r_v => mul_1
# denom2 => mul_2
# log => log
# loss => mul_3
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# pow_4 => pow_4
# r_sqnorm2 => add_1
# sigma2 => add
# softplus => exp, gt, log1p, where
# sub_2 => sub_2
# sum_1 => sum_1
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, 4), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg2_1, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %add_1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %add_4), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%mul_2,), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, 1.8378770664093453), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg4_1), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg5_1, %arg6_1), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, %pow_4), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, %add), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %div), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg2_1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_3, 2), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_5, %mul_2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_6, %div_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 0.5), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {})
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {8: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 9), equal_to_1=(8,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 7, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp8 = tl.load(in_ptr1 + (r0), None)
tmp10 = tl.load(in_ptr2 + (r0), None)
tmp18 = tl.load(in_ptr3 + (r0), None)
tmp19 = tl.load(in_ptr4 + (r0), None)
tmp22 = tl.load(in_ptr5 + (r0), None)
tmp23 = tl.load(in_ptr6 + (r0), None)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 4.0
tmp7 = tmp5 + tmp6
tmp9 = tmp8 * tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = tmp7 + tmp12
tmp14 = tmp7 * tmp13
tmp15 = tl_math.log(tmp14)
tmp16 = 1.8378770664093453
tmp17 = tmp15 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp20
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp24
tmp26 = tmp21 + tmp25
tmp27 = tmp26 / tmp7
tmp28 = tmp17 + tmp27
tmp29 = tmp20 * tmp8
tmp30 = tmp24 * tmp10
tmp31 = tmp29 + tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp32 / tmp14
tmp34 = tmp28 - tmp33
tmp35 = 0.5
tmp36 = tmp34 * tmp35
tmp37 = tl.broadcast_to(tmp36, [RBLOCK])
tmp39 = triton_helpers.promote_to_tensor(tl.sum(tmp37, 0))
tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp39, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [softplus, sigma2, pow_1, pow_2, r_sqnorm2, add_4, denom2, log, add_5, delta_u, pow_3, delta_v, pow_4, delta_sqnorm, truediv, add_6, delta_u_r_u, delta_v_r_v, delta_r, delta_r_sqnorm, truediv_1, sub_2, loss, sum_1], Original ATen: [aten.softplus, aten.add, aten.pow, aten.mul, aten.log, aten.sub, aten.div, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0.run(arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, buf1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
del arg5_1
del arg6_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg5_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg6_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.utils.data
import torch.nn.functional as F
from torch import nn
class IndepAnisotropicGaussianUVLoss(nn.Module):
"""
Loss for the case of independent residuals with anisotropic covariances:
$Sigma_i = sigma_i^2 I + r_i r_i^T$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi)
+ log sigma_i^2 (sigma_i^2 + ||r_i||^2)
+ ||delta_i||^2 / sigma_i^2
- <delta_i, r_i>^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: 'float'):
super(IndepAnisotropicGaussianUVLoss, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(self, u: 'torch.Tensor', v: 'torch.Tensor', sigma_u:
'torch.Tensor', kappa_u_est: 'torch.Tensor', kappa_v_est:
'torch.Tensor', target_u: 'torch.Tensor', target_v: 'torch.Tensor'):
sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
r_sqnorm2 = kappa_u_est ** 2 + kappa_v_est ** 2
delta_u = u - target_u
delta_v = v - target_v
delta_sqnorm = delta_u ** 2 + delta_v ** 2
delta_u_r_u = delta_u * kappa_u_est
delta_v_r_v = delta_v * kappa_v_est
delta_r = delta_u_r_u + delta_v_r_v
delta_r_sqnorm = delta_r ** 2
denom2 = sigma2 * (sigma2 + r_sqnorm2)
loss = 0.5 * (self.log2pi + torch.log(denom2) + delta_sqnorm /
sigma2 - delta_r_sqnorm / denom2)
return loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]),
torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'sigma_lower_bound': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp8 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr2 + r0, None)
tmp18 = tl.load(in_ptr3 + r0, None)
tmp19 = tl.load(in_ptr4 + r0, None)
tmp22 = tl.load(in_ptr5 + r0, None)
tmp23 = tl.load(in_ptr6 + r0, None)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 4.0
tmp7 = tmp5 + tmp6
tmp9 = tmp8 * tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = tmp7 + tmp12
tmp14 = tmp7 * tmp13
tmp15 = tl_math.log(tmp14)
tmp16 = 1.8378770664093453
tmp17 = tmp15 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp20
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp24
tmp26 = tmp21 + tmp25
tmp27 = tmp26 / tmp7
tmp28 = tmp17 + tmp27
tmp29 = tmp20 * tmp8
tmp30 = tmp24 * tmp10
tmp31 = tmp29 + tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp32 / tmp14
tmp34 = tmp28 - tmp33
tmp35 = 0.5
tmp36 = tmp34 * tmp35
tmp37 = tl.broadcast_to(tmp36, [RBLOCK])
tmp39 = triton_helpers.promote_to_tensor(tl.sum(tmp37, 0))
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp39, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0[grid(1)](arg0_1
, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, buf1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
del arg5_1
del arg6_1
return buf1,
class IndepAnisotropicGaussianUVLossNew(nn.Module):
"""
Loss for the case of independent residuals with anisotropic covariances:
$Sigma_i = sigma_i^2 I + r_i r_i^T$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi)
+ log sigma_i^2 (sigma_i^2 + ||r_i||^2)
+ ||delta_i||^2 / sigma_i^2
- <delta_i, r_i>^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: 'float'):
super(IndepAnisotropicGaussianUVLossNew, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5,
input_6):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
arg4_1 = input_4
arg5_1 = input_5
arg6_1 = input_6
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1])
return output[0]
| AbirKhan96/facebook-detectron2 | IndepAnisotropicGaussianUVLoss | false | 16,867 | [
"Apache-2.0"
] | 5 | 6a3bf813353d74bbeb8674e3566e7bbb33eb5c87 | https://github.com/AbirKhan96/facebook-detectron2/tree/6a3bf813353d74bbeb8674e3566e7bbb33eb5c87 |
TrueDynamics | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ir/cirovb3porneu367voo4wtxiil37cwmkxgvjtxyzpx3mpap54f7v.py
# Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_3, %clamp_max_1], 1), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 8
x0 = xindex % 4
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp7 = 3.141592653589793
tmp8 = tmp5 + tmp7
tmp9 = tl_math.sin(tmp8)
tmp10 = -14.73
tmp11 = tmp9 * tmp10
tmp12 = tl.load(in_ptr0 + (32 + x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp13 = -3.0
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = 3.0
tmp16 = triton_helpers.minimum(tmp14, tmp15)
tmp17 = tmp16 * tmp15
tmp18 = tmp11 + tmp17
tmp19 = 0.08
tmp20 = tmp18 * tmp19
tmp21 = tmp6 + tmp20
tmp22 = tmp21 * tmp19
tmp23 = tmp5 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp4, tmp23, tmp24)
tmp26 = tmp0 >= tmp3
tmp27 = tl.full([1], 8, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tl.load(in_ptr0 + (16 + x0 + (4*((-4) + x1)) + (64*x2)), tmp26 & xmask, other=0.0)
tmp30 = tl.load(in_ptr0 + (x0 + (4*((-4) + x1)) + (64*x2)), tmp26 & xmask, other=0.0)
tmp31 = tmp30 + tmp7
tmp32 = tl_math.sin(tmp31)
tmp33 = tmp32 * tmp10
tmp34 = tl.load(in_ptr0 + (32 + x0 + (4*((-4) + x1)) + (64*x2)), tmp26 & xmask, other=0.0)
tmp35 = triton_helpers.maximum(tmp34, tmp13)
tmp36 = triton_helpers.minimum(tmp35, tmp15)
tmp37 = tmp36 * tmp15
tmp38 = tmp33 + tmp37
tmp39 = tmp38 * tmp19
tmp40 = tmp29 + tmp39
tmp41 = -8.0
tmp42 = triton_helpers.maximum(tmp40, tmp41)
tmp43 = 8.0
tmp44 = triton_helpers.minimum(tmp42, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp26, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp25, tmp46)
tl.store(out_ptr0 + (x3), tmp47, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 2, 4, 4), (32, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
class TrueDynamics(nn.Module):
def __init__(self, env, hidden_size=200, drop_prob=0.0):
super().__init__()
self.env = env
self.hidden_size = hidden_size
self.drop_prob = drop_prob
self.mask1 = None
def forward(self, x):
th = x[:, 0]
thdot = x[:, 1]
u = torch.clamp(x[:, 2], -3, 3)
g = 9.82
m = 1.0
l = 1.0
dt = 0.08
newthdot = thdot + (-3 * g / (2 * l) * torch.sin(th + np.pi) + 3.0 /
(m * l ** 2) * u) * dt
newth = th + newthdot * dt
newthdot = torch.clamp(newthdot, -8, 8)
return torch.stack([newth, newthdot], 1)
def set_sampling(self, sampling=None, batch_size=None):
if sampling is None:
raise ValueError('Sampling cannot be None.')
self.sampling = sampling
if self.sampling:
self.mask1 = Variable(torch.bernoulli(torch.zeros(batch_size,
self.hidden_size).fill_(1 - self.drop_prob)))
self.mask2 = Variable(torch.bernoulli(torch.zeros(batch_size,
self.hidden_size).fill_(1 - self.drop_prob)))
self.mask1 /= 1 - self.drop_prob
self.mask2 /= 1 - self.drop_prob
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'env': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask,
other=0.0)
tmp7 = 3.141592653589793
tmp8 = tmp5 + tmp7
tmp9 = tl_math.sin(tmp8)
tmp10 = -14.73
tmp11 = tmp9 * tmp10
tmp12 = tl.load(in_ptr0 + (32 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask,
other=0.0)
tmp13 = -3.0
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = 3.0
tmp16 = triton_helpers.minimum(tmp14, tmp15)
tmp17 = tmp16 * tmp15
tmp18 = tmp11 + tmp17
tmp19 = 0.08
tmp20 = tmp18 * tmp19
tmp21 = tmp6 + tmp20
tmp22 = tmp21 * tmp19
tmp23 = tmp5 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp4, tmp23, tmp24)
tmp26 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp29 = tl.load(in_ptr0 + (16 + x0 + 4 * (-4 + x1) + 64 * x2), tmp26 &
xmask, other=0.0)
tmp30 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 64 * x2), tmp26 & xmask,
other=0.0)
tmp31 = tmp30 + tmp7
tmp32 = tl_math.sin(tmp31)
tmp33 = tmp32 * tmp10
tmp34 = tl.load(in_ptr0 + (32 + x0 + 4 * (-4 + x1) + 64 * x2), tmp26 &
xmask, other=0.0)
tmp35 = triton_helpers.maximum(tmp34, tmp13)
tmp36 = triton_helpers.minimum(tmp35, tmp15)
tmp37 = tmp36 * tmp15
tmp38 = tmp33 + tmp37
tmp39 = tmp38 * tmp19
tmp40 = tmp29 + tmp39
tmp41 = -8.0
tmp42 = triton_helpers.maximum(tmp40, tmp41)
tmp43 = 8.0
tmp44 = triton_helpers.minimum(tmp42, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp26, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp25, tmp46)
tl.store(out_ptr0 + x3, tmp47, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 2, 4, 4), (32, 16, 4, 1), 0),
class TrueDynamicsNew(nn.Module):
def __init__(self, env, hidden_size=200, drop_prob=0.0):
super().__init__()
self.env = env
self.hidden_size = hidden_size
self.drop_prob = drop_prob
self.mask1 = None
def set_sampling(self, sampling=None, batch_size=None):
if sampling is None:
raise ValueError('Sampling cannot be None.')
self.sampling = sampling
if self.sampling:
self.mask1 = Variable(torch.bernoulli(torch.zeros(batch_size,
self.hidden_size).fill_(1 - self.drop_prob)))
self.mask2 = Variable(torch.bernoulli(torch.zeros(batch_size,
self.hidden_size).fill_(1 - self.drop_prob)))
self.mask1 /= 1 - self.drop_prob
self.mask2 /= 1 - self.drop_prob
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Alfo5123/ConcreteDropout | TrueDynamics | false | 16,868 | [
"MIT"
] | 7 | c442871553e20a2de078c0fbac7fa52302d50abf | https://github.com/Alfo5123/ConcreteDropout/tree/c442871553e20a2de078c0fbac7fa52302d50abf |
EqualConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ol/coljqhqn2ngky5ed74qawwhwl656ahahxt5nc7rr3rlliikrfrsw.py
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# weight => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.1767766952966369), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.1767766952966369
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/vb/cvbno3dccglzmlbisnwicoai3aocrgweun3buh6avsdqdjjhjczh.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %mul, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf2, buf0, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from math import sqrt
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualConv2d(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input):
return self.conv(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.1767766952966369
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, buf0, primals_3, buf0
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualConv2dNew(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input_0):
primals_2 = self.conv.bias
primals_1 = self.conv.weight_orig
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| AaltoVision/balanced-pioneer | EqualConv2d | false | 16,869 | [
"MIT"
] | 5 | 51f58080fd2db3159de3e1ccb47f38e03220faf0 | https://github.com/AaltoVision/balanced-pioneer/tree/51f58080fd2db3159de3e1ccb47f38e03220faf0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.